diff --git a/controller-client/draw.lua b/controller-client/draw.lua new file mode 100644 index 0000000..5f248e3 --- /dev/null +++ b/controller-client/draw.lua @@ -0,0 +1,43 @@ + +local function getTextY(line) + return 15 + 25 * line +end + +function love.draw2() + -- Set background + love.graphics.setBackgroundColor(0, 0, 0) + love.graphics.setColor(1, 1, 1) + + if BotState.camfeed then + love.graphics.draw(BotState.camfeed) + end + + -- Draw time + local time + if BotState.lastMessage == 0 then + time = "Never" + else + time = math.floor(love.timer.getTime() - BotState.lastMessage) .. "s ago" + end + love.graphics.print("Last message received: " .. time, 5, 5) + + -- Draw cpu battery + if BotState.cpuBatteryCorrected == nil or BotState.cpuBatteryCorrected <= 3 then + love.graphics.setColor(1, 0, 0) + else + love.graphics.setColor(1, 1, 1) + end + love.graphics.print("CPU Batt: " .. formatSafe("%.02f (%.02f) V", BotState.cpuBattery, BotState.cpuBatteryCorrected), 5, getTextY(1)) + + -- Draw servo battery + if BotState.servoBatteryCorrected == nil or BotState.servoBatteryCorrected <= 3 then + love.graphics.setColor(1, 0, 0) + else + love.graphics.setColor(1, 1, 1) + end + love.graphics.print("Servo Batt: " .. formatSafe("%.02f (%.02f) V", BotState.servoBattery, BotState.servoBatteryCorrected), 5, getTextY(2)) + + -- Draw latency + love.graphics.setColor(1, 1, 1) + love.graphics.print("Latency: " .. Ping.latency, 5, getTextY(3)) +end diff --git a/controller-client/main.lua b/controller-client/main.lua index 6aee7f1..e56432a 100644 --- a/controller-client/main.lua +++ b/controller-client/main.lua @@ -1,45 +1,70 @@ -local lastMessage = 0 +package.loaded["draw"] = nil -local botState = { +require("draw") + +BotState = { + lastMessage = 0, cpuBattery = nil, + cpuBatteryCorrected = nil, servoBattery = nil, + servoBatteryCorrected = nil, + camfeed = nil, } -function love.draw2() - love.graphics.setBackgroundColor(0, 0, 0) +Ping = { + timeSent = 0, + latency = "unknown", + payload = nil, +} - local time - if lastMessage == 0 then - time = "Never" - else - time = math.floor(love.timer.getTime() - lastMessage) .. "s ago" +function love.update2() + local now = love.timer.getTime() + if now - Ping.timeSent > 5 then + Ping.payload = "" + for i = 0, 10 do + Ping.payload = Ping.payload .. string.char(love.math.random(65, 91)) + end + Ping.timeSent = now + love.mqtt.send("command/ping", Ping.payload) + print("Sending ping") end - love.graphics.print("Last message received: " .. time, 5, 5) - - love.graphics.print("CPU Batt: " .. formatSafe("%.02f V", botState.cpuBattery), 5, 30) - love.graphics.print("Servo Batt: " .. formatSafe("%.02f V", botState.servoBattery), 5, 45) end -function formatSafe(format, value) +function formatSafe(format, value, ...) if value == nil then return "unknown" end - return string.format(format, value) + return string.format(format, value, ...) end function love.load() - love.graphics.setFont(love.graphics.newFont(15)) + love.graphics.setFont(love.graphics.newFont(20)) love.window.setFullscreen(true) love.mqtt.subscribe("telemetry/#") end function love.mqtt.message(topic, payload) + local oldTime = BotState.lastMessage + BotState.lastMessage = love.timer.getTime() + if topic == "telemetry/cpu_battery" then - botState.cpuBattery = tonumber(payload) - lastMessage = love.timer.getTime() + BotState.cpuBattery = tonumber(payload) + BotState.cpuBatteryCorrected = BotState.cpuBattery / 2 elseif topic == "telemetry/servo_battery" then - botState.servoBattery = tonumber(payload) - lastMessage = love.timer.getTime() + BotState.servoBattery = tonumber(payload) + BotState.servoBatteryCorrected = BotState.servoBattery / 2 + elseif topic == "telemetry/camfeed" then + print("Got camfeed") + fileData = love.filesystem.newFileData(payload, "camfeed") + BotState.camfeed = love.graphics.newImage(fileData) + elseif topic == "telemetry/pong" then + if payload == Ping.payload then + local timeReceived = love.timer.getTime() + Ping.latency = math.floor((timeReceived - Ping.timeSent) * 1000) .. "ms" + end + else + print("Got unknown telemetry at " .. topic) + BotState.lastMessage = oldTime end end diff --git a/controller-host/mqttthread.lua b/controller-host/mqttthread.lua index f0fd80a..1f8b771 100644 --- a/controller-host/mqttthread.lua +++ b/controller-host/mqttthread.lua @@ -38,7 +38,8 @@ local function onCommand(command) assert(client:subscribe { topic = topic }) - print("Subribed to " .. topic) + print("Subscribed to " .. topic) + print("Subscribed to " .. topic) end end diff --git a/docker-compose.yml b/docker-compose.yml index c8fba9a..63303ad 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -6,6 +6,34 @@ services: environment: SPIDER_HOSTNAME: spider + spider-cam: + build: spider-cam + restart: unless-stopped + privileged: true + devices: + - /dev/v4l-subdev0 + - /dev/dma_heap + - /dev/video0 + - /dev/video10 + - /dev/video11 + - /dev/video12 + - /dev/video13 + - /dev/video14 + - /dev/video15 + - /dev/video16 + - /dev/video18 + - /dev/video19 + - /dev/video20 + - /dev/video21 + - /dev/video22 + - /dev/video23 + - /dev/video31 + - /dev/media0 + - /dev/media1 + - /dev/media2 + - /dev/media3 + - /dev/media4 + spider-host: build: spider-host restart: unless-stopped diff --git a/spider-cam/Dockerfile b/spider-cam/Dockerfile new file mode 100644 index 0000000..eb2cbbb --- /dev/null +++ b/spider-cam/Dockerfile @@ -0,0 +1,16 @@ +FROM alpine:3.20.1 + +RUN apk add --no-cache git meson alpine-sdk cmake linux-headers python3 python3-dev \ + py3-yaml py3-jinja2 py3-ply py3-pybind11 py3-pybind11-dev py3-paho-mqtt +#RUN apk add --no-cache libcamera libcamera-tools libcamera-v4l2 python3 python3-dev \ +# cython py3-setuptools alpine-sdk ffmpeg ffmpeg-dev + +WORKDIR /libcamera +ADD libcamera /libcamera +RUN meson setup --prefix /usr build && ninja -C build install + +WORKDIR /app +COPY mfb.py /app +COPY spider-cam.py /app + +CMD ["python3", "spider-cam.py"] \ No newline at end of file diff --git a/spider-cam/libcamera/.clang-format b/spider-cam/libcamera/.clang-format new file mode 100644 index 0000000..f2b44e8 --- /dev/null +++ b/spider-cam/libcamera/.clang-format @@ -0,0 +1,168 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# clang-format configuration file. Intended for clang-format >= 12. +# +# For more information, see: +# +# Documentation/process/clang-format.rst +# https://clang.llvm.org/docs/ClangFormat.html +# https://clang.llvm.org/docs/ClangFormatStyleOptions.html +# +--- +Language: Cpp +AccessModifierOffset: -8 +AlignAfterOpenBracket: Align +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +AlignEscapedNewlines: Right +AlignOperands: true +AlignTrailingComments: false +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: InlineOnly +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: false +AlwaysBreakTemplateDeclarations: MultiLine +BinPackArguments: true +BinPackParameters: true +BraceWrapping: + AfterClass: true + AfterControlStatement: false + AfterEnum: false + AfterFunction: true + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + AfterExternBlock: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyRecord: true + SplitEmptyNamespace: true +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Custom +BreakBeforeInheritanceComma: false +BreakInheritanceList: BeforeColon +BreakBeforeTernaryOperators: true +BreakConstructorInitializers: BeforeColon +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: false +ColumnLimit: 0 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false +ConstructorInitializerAllOnOneLineOrOnePerLine: false +ConstructorInitializerIndentWidth: 8 +ContinuationIndentWidth: 8 +Cpp11BracedListStyle: false +DerivePointerAlignment: false +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +ForEachMacros: + - 'udev_list_entry_foreach' +IncludeBlocks: Regroup +IncludeCategories: + # Headers matching the name of the component are matched automatically. + # Priority 1 + # Other library headers (explicit overrides to match before system headers) + - Regex: '(|||||)' + Priority: 9 + # Qt includes (match before C++ standard library) + - Regex: '' + CaseSensitive: true + Priority: 9 + # Headers in <> with an extension. (+system libraries) + - Regex: '<([A-Za-z0-9\-_])+\.h>' + Priority: 2 + # System headers + - Regex: '' + Priority: 2 + # C++ standard library includes (no extension) + - Regex: '<([A-Za-z0-9\-_/])+>' + Priority: 2 + # Linux headers, as a second group/subset of system headers + - Regex: '' + Priority: 3 + # Headers for libcamera Base support + - Regex: '' + Priority: 4 + - Regex: '' + Priority: 5 + # Public API Headers for libcamera, which are not in a subdir (i.e. ipa/,internal/) + - Regex: '' + Priority: 6 + # IPA Interfaces + - Regex: '' + Priority: 7 + # libcamera Internal headers in "" + - Regex: '"libcamera/internal/.*\.h"' + Priority: 8 + # Other libraries headers with one group per library (.h or .hpp) + - Regex: '<.*/.*\.hp*>' + Priority: 9 + # local modular includes "path/file.h" (.h or .hpp) + - Regex: '"(.*/)+.*\.hp*"' + Priority: 10 + # Other local headers "file.h" with extension (.h or .hpp) + - Regex: '".*.hp*"' + Priority: 11 + # Any unmatched line, separated from the last group + - Regex: '"*"' + Priority: 100 + +IncludeIsMainRegex: '(_test)?$' +IndentCaseLabels: false +IndentPPDirectives: None +IndentWidth: 8 +IndentWrappedFunctionNames: false +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: false +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBinPackProtocolList: Auto +ObjCBlockIndentWidth: 8 +ObjCSpaceAfterProperty: true +ObjCSpaceBeforeProtocolList: true + +# Taken from git's rules +PenaltyBreakAssignment: 10 +PenaltyBreakBeforeFirstCallParameter: 30 +PenaltyBreakComment: 10 +PenaltyBreakFirstLessLess: 0 +PenaltyBreakString: 10 +PenaltyBreakTemplateDeclaration: 10 +PenaltyExcessCharacter: 100 +PenaltyReturnTypeOnItsOwnLine: 60 + +PointerAlignment: Right +ReflowComments: false +SortIncludes: true +SortUsingDeclarations: true +SpaceAfterCStyleCast: false +SpaceAfterTemplateKeyword: false +SpaceBeforeAssignmentOperators: true +SpaceBeforeCpp11BracedList: false +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpaceBeforeParens: ControlStatements +SpaceBeforeRangeBasedForLoopColon: true +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: false +SpacesInContainerLiterals: false +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Cpp11 +TabWidth: 8 +UseTab: Always +... diff --git a/spider-cam/libcamera/.clang-tidy b/spider-cam/libcamera/.clang-tidy new file mode 100644 index 0000000..8056d7a --- /dev/null +++ b/spider-cam/libcamera/.clang-tidy @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: CC0-1.0 + +Checks: -clang-diagnostic-c99-designator +FormatStyle: file diff --git a/spider-cam/libcamera/.gitignore b/spider-cam/libcamera/.gitignore new file mode 100644 index 0000000..f6d1d68 --- /dev/null +++ b/spider-cam/libcamera/.gitignore @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: CC0-1.0 + +/build/ +/patches/ + +*.patch +*.pyc +__pycache__/ diff --git a/spider-cam/libcamera/.reuse/dep5 b/spider-cam/libcamera/.reuse/dep5 new file mode 100644 index 0000000..c5ef5e0 --- /dev/null +++ b/spider-cam/libcamera/.reuse/dep5 @@ -0,0 +1,31 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: libcamera +Upstream-Contact: Laurent Pinchart +Source: https://git.libcamera.org/libcamera/libcamera.git/ + +Files: Documentation/binning.svg + Documentation/camera-sensor-model.rst + Documentation/sensor_model.svg +Copyright: Copyright 2023 Ideas On Board Oy +License: CC-BY-SA-4.0 + +Files: Documentation/theme/static/search.png +Copyright: 2022 Fonticons, Inc. +License: CC-BY-4.0 + +Files: src/ipa/rpi/vc4/data/*.json + utils/raspberrypi/ctt/ctt_config_example.json + utils/raspberrypi/ctt/ctt_ref.pgm +Copyright: 2019-2020 Raspberry Pi Ltd +License: BSD-2-Clause + +Files: src/qcam/assets/feathericons/*.svg +Copyright: 2019 Cole Bemis (and other Feather icons contributors) +License: MIT +Comment: https://feathericons.com/ + +Files: utils/ipc/mojo + utils/ipc/tools +Copyright: Copyright 2013-2020 The Chromium Authors. All rights reserved. +License: BSD-3-Clause +Source: https://chromium.googlesource.com/chromium/src.git/ diff --git a/spider-cam/libcamera/COPYING.rst b/spider-cam/libcamera/COPYING.rst new file mode 100644 index 0000000..479a553 --- /dev/null +++ b/spider-cam/libcamera/COPYING.rst @@ -0,0 +1,71 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +========== + Licenses +========== + +TL;DR summary: The libcamera core is covered by the LGPL-2.1-or-later license. +IPA modules included in libcamera are covered by a free software license. +Third-parties may develop IPA modules outside of libcamera and distribute them +under a closed-source license, provided they do not include source code from +the libcamera project. + +The libcamera project contains multiple libraries, applications and utilities. +Licenses are expressed through SPDX tags in text-based files that support +comments, and through the .reuse/dep5 file otherwise. A copy of all licenses is +stored in the LICENSES directory. + +The following text summarizes the licenses covering the different components of +the project to offer a quick overview for developers. The SPDX and DEP5 +information are however authoritative and shall prevail in case of +inconsistencies with the text below. + +The libcamera core source code, located under the include/libcamera/ and +src/libcamera/ directories, is fully covered by the LGPL-2.1-or-later license, +which thus covers distribution of the libcamera.so binary. Other files located +in those directories, most notably the meson build files, and various related +build scripts, may be covered by different licenses. None of their source code +is incorporated in the in the libcamera.so binary, they thus don't affect the +distribution terms of the binary. + +The IPA modules, located in src/ipa/, are covered by free software licenses +chosen by the module authors. The LGPL-2.1-or-later license is recommended. +Those modules are compiled as separate binaries and dynamically loaded by the +libcamera core at runtime. + +The IPA module API is defined in headers located in include/libcamera/ipa/ and +covered by the LGPL-2.1-or-later license. Using the data types (including +classes, structures and enumerations) and macros defined in the IPA module and +libcamera core API headers in IPA modules doesn't extend the LGPL license to +the IPA modules. Third-party closed-source IPA modules are thus permitted, +provided they comply with the licensing requirements of any software they +include or link to. + +The libcamera Android camera HAL component is located in src/android/. The +libcamera-specific source code is covered by the LGPL-2.1-or-later license. The +component additionally contains header files and source code, located +respectively in include/android/ and src/android/metadata/, copied verbatim +from Android and covered by the Apache-2.0 license. + +The libcamera GStreamer and V4L2 adaptation source code, located respectively +in src/gstreamer/ and src/v4l2/, is fully covered by the LGPL-2.1-or-later +license. Those components are compiled to separate binaries and do not +influence the license of the libcamera core. + +The cam and qcam sample applications, as well as the unit tests, located +respectively in src/cam/, src/qcam/ and test/, are covered by the +GPL-2.0-or-later license. qcam additionally includes an icon set covered by the +MIT license. Those applications are compiled to separate binaries and do not +influence the license of the libcamera core. + +Additional utilities are located in the utils/ directory and are covered by +various licenses. They are not part of the libcamera core and do not influence +its license. + +Finally, copies of various Linux kernel headers are included in include/linux/ +to avoid depending on particular versions of those headers being installed in +the system. The Linux kernel headers are covered by their respective license, +including the Linux kernel license syscall exception. Using a copy of those +headers doesn't affect libcamera licensing terms in any way compared to using +the same headers installed in the system from kernel headers packages provided +by Linux distributions. diff --git a/spider-cam/libcamera/Documentation/Doxyfile.in b/spider-cam/libcamera/Documentation/Doxyfile.in new file mode 100644 index 0000000..abafcf6 --- /dev/null +++ b/spider-cam/libcamera/Documentation/Doxyfile.in @@ -0,0 +1,90 @@ +# SPDX-License-Identifier: CC-BY-SA-4.0 +# Doxyfile 1.9.5 + +PROJECT_NAME = "libcamera" +PROJECT_NUMBER = "@VERSION@" +PROJECT_BRIEF = "Supporting cameras in Linux since 2019" + +OUTPUT_DIRECTORY = "@OUTPUT_DIR@" + +STRIP_FROM_PATH = "@TOP_SRCDIR@" + +ALIASES = "context=\xrefitem context \"Thread Safety\" \"Thread Safety\"" \ + "threadbound=\ref thread-bound \"thread-bound\"" \ + "threadsafe=\ref thread-safe \"thread-safe\"" + +EXTENSION_MAPPING = h=C++ + +TOC_INCLUDE_HEADINGS = 0 + +CASE_SENSE_NAMES = YES + +QUIET = YES +WARN_AS_ERROR = @WARN_AS_ERROR@ + +INPUT = "@TOP_SRCDIR@/include/libcamera" \ + "@TOP_SRCDIR@/src/ipa/ipu3" \ + "@TOP_SRCDIR@/src/ipa/libipa" \ + "@TOP_SRCDIR@/src/libcamera" \ + "@TOP_BUILDDIR@/include/libcamera" \ + "@TOP_BUILDDIR@/src/libcamera" + +FILE_PATTERNS = *.c \ + *.cpp \ + *.h + +RECURSIVE = YES + +EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \ + @TOP_SRCDIR@/include/libcamera/internal/device_enumerator_sysfs.h \ + @TOP_SRCDIR@/include/libcamera/internal/device_enumerator_udev.h \ + @TOP_SRCDIR@/include/libcamera/internal/ipc_pipe_unixsocket.h \ + @TOP_SRCDIR@/src/libcamera/device_enumerator_sysfs.cpp \ + @TOP_SRCDIR@/src/libcamera/device_enumerator_udev.cpp \ + @TOP_SRCDIR@/src/libcamera/ipc_pipe_unixsocket.cpp \ + @TOP_SRCDIR@/src/libcamera/pipeline/ \ + @TOP_SRCDIR@/src/libcamera/tracepoints.cpp \ + @TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \ + @TOP_BUILDDIR@/include/libcamera/ipa/soft_ipa_interface.h \ + @TOP_BUILDDIR@/src/libcamera/proxy/ + +EXCLUDE_PATTERNS = @TOP_BUILDDIR@/include/libcamera/ipa/*_serializer.h \ + @TOP_BUILDDIR@/include/libcamera/ipa/*_proxy.h \ + @TOP_BUILDDIR@/include/libcamera/ipa/ipu3_*.h \ + @TOP_BUILDDIR@/include/libcamera/ipa/raspberrypi_*.h \ + @TOP_BUILDDIR@/include/libcamera/ipa/rkisp1_*.h \ + @TOP_BUILDDIR@/include/libcamera/ipa/vimc_*.h + +EXCLUDE_SYMBOLS = libcamera::BoundMethodArgs \ + libcamera::BoundMethodBase \ + libcamera::BoundMethodFunctor \ + libcamera::BoundMethodMember \ + libcamera::BoundMethodPack \ + libcamera::BoundMethodPackBase \ + libcamera::BoundMethodStatic \ + libcamera::CameraManager::Private \ + libcamera::SignalBase \ + libcamera::ipa::AlgorithmFactoryBase \ + *::details \ + std::* + +EXCLUDE_SYMLINKS = YES + +HTML_OUTPUT = api-html + +GENERATE_LATEX = NO + +MACRO_EXPANSION = YES +EXPAND_ONLY_PREDEF = YES + +INCLUDE_PATH = "@TOP_SRCDIR@/include/libcamera" +INCLUDE_FILE_PATTERNS = *.h + +IMAGE_PATH = "@TOP_SRCDIR@/Documentation/images" + +PREDEFINED = __DOXYGEN__ \ + __cplusplus \ + __attribute__(x)= \ + @PREDEFINED@ + +HAVE_DOT = YES diff --git a/spider-cam/libcamera/Documentation/api-html/index.rst b/spider-cam/libcamera/Documentation/api-html/index.rst new file mode 100644 index 0000000..9e630fc --- /dev/null +++ b/spider-cam/libcamera/Documentation/api-html/index.rst @@ -0,0 +1,8 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +.. _api: + +API +=== + +:: Placeholder for Doxygen documentation diff --git a/spider-cam/libcamera/Documentation/binning.svg b/spider-cam/libcamera/Documentation/binning.svg new file mode 100644 index 0000000..c6a3b63 --- /dev/null +++ b/spider-cam/libcamera/Documentation/binning.svg @@ -0,0 +1,5053 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spider-cam/libcamera/Documentation/camera-sensor-model.rst b/spider-cam/libcamera/Documentation/camera-sensor-model.rst new file mode 100644 index 0000000..b66c880 --- /dev/null +++ b/spider-cam/libcamera/Documentation/camera-sensor-model.rst @@ -0,0 +1,173 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +.. _camera-sensor-model: + +.. todo: Move to Doxygen-generated documentation + +The libcamera camera sensor model +================================= + +libcamera defines an abstract camera sensor model in order to provide +a description of each of the processing steps that result in image data being +sent on the media bus and that form the image stream delivered to applications. + +Applications should use the abstract camera sensor model defined here to +precisely control the operations of the camera sensor. + +The libcamera camera sensor model targets image sensors producing frames in +RAW format, delivered through a MIPI CSI-2 compliant bus implementation. + +The abstract sensor model maps libcamera components to the characteristics and +operations of an image sensor, and serves as a reference to model the libcamera +CameraSensor class and SensorConfiguration classes and operations. + +In order to control the configuration of the camera sensor through the +SensorConfiguration class, applications should understand this model and map it +to the combination of image sensor and kernel driver in use. + +The camera sensor model defined here is based on the *MIPI CCS specification*, +particularly on *Section 8.2 - Image readout* of *Chapter 8 - Video Timings*. + + +Glossary +-------- + +.. glossary:: + + Pixel array + The full grid of pixels, active and inactive ones + + Pixel array active area + The portion(s) of the pixel array that contains valid and readable pixels; + corresponds to the libcamera properties::PixelArrayActiveAreas + + Analog crop rectangle + The portion of the *pixel array active area* which is read out and passed + to further processing stages + + Subsampling + Pixel processing techniques that reduce the image size by binning or by + skipping adjacent pixels + + Digital crop + Crop of the sub-sampled image data before scaling + + Frame output + The frame (image) as output on the media bus by the camera sensor + +Camera sensor model +------------------- + +The abstract sensor model is described in the following diagram. + +.. figure:: sensor_model.svg + + +1. The sensor reads pixels from the *pixel array*. The pixels being read out are + selected by the *analog crop rectangle*. + +2. The pixels can be subsampled to reduce the image size without affecting the + field of view. Two subsampling techniques can be used: + + - Binning: combines adjacent pixels of the same colour by averaging or + summing their values, in the analog domain and/or the digital domain. + + .. figure:: binning.svg + + + - Skipping: skips the read out of a number of adjacent pixels. + + .. figure:: skipping.svg + + +3. The output of the optional sub-sampling stage is then cropped after the + conversion of the analogue pixel values in the digital domain. + +4. The resulting output frame is sent on the media bus by the sensor. + +Camera Sensor configuration parameters +-------------------------------------- + +The libcamera camera sensor model defines parameters that allow users to +control: + +1. The image format bit depth + +2. The size and position of the *Analog crop rectangle* + +3. The subsampling factors used to downscale the pixel array readout data to a + smaller frame size without reducing the image *field of view*. Two + configuration parameters are made available to control the downscaling + factor: + + - binning + A vertical and horizontal binning factor can be specified, the image + will be downscaled in its vertical and horizontal sizes by the specified + factor. + + .. code-block:: c + :caption: Definition: The horizontal and vertical binning factors + + horizontal_binning = xBin; + vertical_binning = yBin; + + - skipping + Skipping reduces the image resolution by skipping the read-out of a number + of adjacent pixels. The skipping factor is specified by the 'increment' + number (number of pixels to 'skip') in the vertical and horizontal + directions and for even and odd rows and columns. + + .. code-block:: c + :caption: Definition: The horizontal and vertical skipping factors + + horizontal_skipping = (xOddInc + xEvenInc) / 2; + vertical_skipping = (yOddInc + yEvenInc) / 2; + + Different sensors perform the binning and skipping stages in different + orders. For the sake of computing the final output image size the order of + execution is not relevant. The overall down-scaling factor is obtained by + combining the binning and skipping factors. + + .. code-block:: c + :caption: Definition: The total scaling factor (binning + sub-sampling) + + total_horizontal_downscale = horizontal_binning + horizontal_skipping; + total_vertical_downscale = vertical_binning + vertical_skipping; + + +4. The output size is used to specify any additional cropping on the sub-sampled + frame. + +5. The total line length and frame height (*visibile* pixels + *blankings*) as + sent on the MIPI CSI-2 bus. + +6. The pixel transmission rate on the MIPI CSI-2 bus. + +The above parameters are combined to obtain the following high-level +configurations: + +- **frame output size** + + Obtained by applying a crop to the physical pixel array size in the analog + domain, followed by optional binning and sub-sampling (in any order), + followed by an optional crop step in the output digital domain. + +- **frame rate** + + The combination of the *total frame size*, the image format *bit depth* and + the *pixel rate* of the data sent on the MIPI CSI-2 bus allows to compute the + image stream frame rate. The equation is the well known: + + .. code-block:: c + + frame_duration = total_frame_size / pixel_rate; + frame_rate = 1 / frame_duration; + + + where the *pixel_rate* parameter is the result of the sensor's configuration + of the MIPI CSI-2 bus *(the following formula applies to MIPI CSI-2 when + used on MIPI D-PHY physical protocol layer only)* + + .. code-block:: c + + pixel_rate = csi_2_link_freq * 2 * nr_of_lanes / bits_per_sample; diff --git a/spider-cam/libcamera/Documentation/code-of-conduct.rst b/spider-cam/libcamera/Documentation/code-of-conduct.rst new file mode 100644 index 0000000..38b7d7a --- /dev/null +++ b/spider-cam/libcamera/Documentation/code-of-conduct.rst @@ -0,0 +1,94 @@ +.. SPDX-License-Identifier: CC-BY-4.0 + +.. _code-of-conduct: + +Contributor Covenant Code of Conduct +==================================== + +Our Pledge +---------- + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +Our Standards +------------- + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +Our Responsibilities +-------------------- + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +Scope +----- + +This Code of Conduct applies within all project spaces, and it also applies when +an individual is representing the project or its community in public spaces. +Examples of representing a project or community include using an official +project e-mail address, posting via an official social media account, or acting +as an appointed representative at an online or offline event. Representation of +a project may be further defined and clarified by project maintainers. + +Enforcement +----------- + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at conduct@libcamera.org, or directly to +any member of the code of conduct team: + +* Kieran Bingham +* Laurent Pinchart + +All complaints will be reviewed and investigated and will result in a response +that is deemed necessary and appropriate to the circumstances. The project team +is obligated to maintain confidentiality with regard to the reporter of an +incident. Further details of specific enforcement policies may be posted +separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +Attribution +----------- + +This Code of Conduct is adapted from the `Contributor Covenant`_, version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +.. _Contributor Covenant: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq + diff --git a/spider-cam/libcamera/Documentation/coding-style.rst b/spider-cam/libcamera/Documentation/coding-style.rst new file mode 100644 index 0000000..72cb28d --- /dev/null +++ b/spider-cam/libcamera/Documentation/coding-style.rst @@ -0,0 +1,429 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +.. _coding-style-guidelines: + +Coding Style Guidelines +======================= + +These coding guidelines are meant to ensure code quality. As a contributor +you are expected to follow them in all code submitted to the project. While +strict compliance is desired, exceptions are tolerated when justified with +good reasons. Please read the whole coding guidelines and use common sense +to decide when departing from them is appropriate. + +libcamera is written in C++, a language that has seen many revisions and +offers an extensive set of features that are easy to abuse. These coding +guidelines establish the subset of C++ used by the project. + + +Coding Style +------------ + +Even if the programming language in use is different, the project embraces the +`Linux Kernel Coding Style`_ with a few exception and some C++ specificities. + +.. _Linux Kernel Coding Style: https://www.kernel.org/doc/html/latest/process/coding-style.html + +In particular, from the kernel style document, the following section are adopted: + +* 1 "Indentation" +* 2 "Breaking Long Lines" striving to fit code within 80 columns and + accepting up to 120 columns when necessary +* 3 "Placing Braces and Spaces" +* 3.1 "Spaces" +* 8 "Commenting" with the exception that in-function comments are not + always un-welcome. + +While libcamera uses the kernel coding style for all typographic matters, the +project is a user space library, developed in a different programming language, +and the kernel guidelines fall short for this use case. + +For this reason, rules and guidelines from the `Google C++ Style Guide`_ have +been adopted as well as most coding principles specified therein, with a +few exceptions and relaxed limitations on some subjects. + +.. _Google C++ Style Guide: https://google.github.io/styleguide/cppguide.html + +The following exceptions apply to the naming conventions specified in the +document: + +* File names: libcamera uses the .cpp extensions for C++ source files and + the .h extension for header files +* Variables, function parameters, function names and class members use + camel case style, with the first letter in lower-case (as in 'camelCase' + and not 'CamelCase') +* Types (classes, structs, type aliases, and type template parameters) use + camel case, with the first letter in capital case (as in 'CamelCase' and + not 'camelCase') +* Enum members use 'CamelCase', while macros are in capital case with + underscores in between +* All formatting rules specified in the selected sections of the Linux kernel + Code Style for indentation, braces, spacing, etc +* Headers are guarded by the use of '#pragma once' + +Order of Includes +~~~~~~~~~~~~~~~~~ + +Headers shall be included at the beginning of .c, .cpp and .h files, right +after the file description comment block and, for .h files, the header guard +macro. For .cpp files, if the file implements an API declared in a header file, +that header file shall be included first in order to ensure it is +self-contained. + +While the following list is extensive, it documents the expected behaviour +defined by the clang-format configuration and tooling should assist with +ordering. + +The headers shall be grouped and ordered as follows: + +1. The header declaring the API being implemented (if any) +2. The C and C++ system and standard library headers +3. Linux kernel headers +4. The libcamera base private header if required +5. The libcamera base library headers +6. The libcamera public API headers +7. The libcamera IPA interfaces +8. The internal libcamera headers +9. Other libraries' headers, with one group per library +10. Local headers grouped by subdirectory +11. Any local headers + +Groups of headers shall be separated by a single blank line. Headers within +each group shall be sorted alphabetically. + +System and library headers shall be included with angle brackets. Project +headers shall be included with angle brackets for the libcamera public API +headers, and with double quotes for internal libcamera headers. + + +C++ Specific Rules +------------------ + +The code shall be implemented in C++17, with the following caveats: + +* Type inference (auto and decltype) shall be used with caution, to avoid + drifting towards an untyped language. +* The explicit, override and final specifiers are to be used where applicable. +* Smart pointers, as well as shared pointers and weak pointers, shall not be + overused. +* Classes are encouraged to define move constructors and assignment operators + where applicable, and generally make use of the features offered by rvalue + references. + +Object Ownership +~~~~~~~~~~~~~~~~ + +libcamera creates and destroys many objects at runtime, for both objects +internal to the library and objects exposed to the user. To guarantee proper +operation without use after free, double free or memory leaks, knowing who owns +each object at any time is crucial. The project has enacted a set of rules to +make object ownership tracking as explicit and fool-proof as possible. + +In the context of this section, the terms object and instance are used +interchangeably and both refer to an instance of a class. The term reference +refers to both C++ references and C++ pointers in their capacity to refer to an +object. Passing a reference means offering a way to a callee to obtain a +reference to an object that the caller has a valid reference to. Borrowing a +reference means using a reference passed by a caller without ownership transfer +based on the assumption that the caller guarantees the validity of the +reference for the duration of the operation that borrows it. + +1. Single Owner Objects + + * By default an object has a single owner at any time. + * Storage of single owner objects varies depending on how the object + ownership will evolve through the lifetime of the object. + + * Objects whose ownership needs to be transferred shall be stored as + std::unique_ptr<> as much as possible to emphasize the single ownership. + * Objects whose owner doesn't change may be embedded in other objects, or + stored as pointer or references. They may be stored as std::unique_ptr<> + for automatic deletion if desired. + + * Ownership is transferred by passing the reference as a std::unique_ptr<> + and using std::move(). After ownership transfer the former owner has no + valid reference to the object anymore and shall not access it without first + obtaining a valid reference. + * Objects may be borrowed by passing an object reference from the owner to + the borrower, providing that + + * the owner guarantees the validity of the reference for the whole duration + of the borrowing, and + * the borrower doesn't access the reference after the end of the borrowing. + + When borrowing from caller to callee for the duration of a function call, + this implies that the callee shall not keep any stored reference after it + returns. These rules apply to the callee and all the functions it calls, + directly or indirectly. + + When the object is stored in a std::unique_ptr<>, borrowing passes a + reference to the object, not to the std::unique_ptr<>, as + + * a 'const &' when the object doesn't need to be modified and may not be + null. + * a pointer when the object may be modified or may be null. Unless + otherwise specified, pointers passed to functions are considered as + borrowed references valid for the duration of the function only. + +2. Shared Objects + + * Objects that may have multiple owners at a given time are called shared + objects. They are reference-counted and live as long as any references to + the object exist. + * Shared objects are created with std::make_shared<> or + std::allocate_shared<> and stored in an std::shared_ptr<>. + * Ownership is shared by creating and passing copies of any valid + std::shared_ptr<>. Ownership is released by destroying the corresponding + std::shared_ptr<>. + * When passed to a function, std::shared_ptr<> are always passed by value, + never by reference. The caller can decide whether to transfer its ownership + of the std::shared_ptr<> with std::move() or retain it. The callee shall + use std::move() if it needs to store the shared pointer. + * Do not over-use std::move(), as it may prevent copy-elision. In particular + a function returning a std::shared_ptr<> value shall not use std::move() in + its return statements, and its callers shall not wrap the function call + with std::move(). + * Borrowed references to shared objects are passed as references to the + objects themselves, not to the std::shared_ptr<>, with the same rules as + for single owner objects. + +These rules match the `object ownership rules from the Chromium C++ Style Guide`_. + +.. _object ownership rules from the Chromium C++ Style Guide: https://chromium.googlesource.com/chromium/src/+/master/styleguide/c++/c++.md#object-ownership-and-calling-conventions + +.. attention:: Long term borrowing of single owner objects is allowed. Example + use cases are implementation of the singleton pattern (where the singleton + guarantees the validity of the reference forever), or returning references + to global objects whose lifetime matches the lifetime of the application. As + long term borrowing isn't marked through language constructs, it shall be + documented explicitly in details in the API. + +Global Variables +~~~~~~~~~~~~~~~~ + +The order of initializations and destructions of global variables cannot be +reasonably controlled. This can cause problems (including segfaults) when global +variables depend on each other, directly or indirectly. For example, if the +declaration of a global variable calls a constructor which uses another global +variable that hasn't been initialized yet, incorrect behavior is likely. +Similar issues may occur when the library is unloaded and global variables are +destroyed. + +Global variables that are statically initialized and have trivial destructors +(such as an integer constant) do not cause any issue. Other global variables +shall be avoided when possible, but are allowed when required (for instance to +implement factories with auto-registration). They shall not depend on any other +global variable, should run a minimal amount of code in the constructor and +destructor, and code that contains dependencies should be moved to a later +point in time. + +Error Handling +~~~~~~~~~~~~~~ + +Proper error handling is crucial to the stability of libcamera. The project +follows a set of high-level rules: + +* Make errors impossible through API design. The best way to handle errors is + to prevent them from happening in the first place. The preferred option is + thus to prevent error conditions at the API design stage when possible. +* Detect errors at compile time. Compile-test checking of errors not only + reduces the runtime complexity, but also ensures that errors are caught early + on during development instead of during testing or, worse, in production. The + static_assert() declaration should be used where possible for this purpose. +* Validate all external API contracts. Explicit pre-condition checks shall be + used to validate API contracts. Whenever possible, appropriate errors should + be returned directly. As libcamera doesn't use exceptions, errors detected in + constructors shall result in the constructed object being marked as invalid, + with a public member function available to check validity. The checks should + be thorough for the public API, and may be lighter for internal APIs when + pre-conditions can reasonably be considered to be met through other means. +* Use assertions for fatal issues only. The ASSERT() macro causes a program + abort when compiled in debug mode, and is a no-op otherwise. It is useful to + abort execution synchronously with the error check instead of letting the + error cause problems (such as segmentation faults) later, and to provide a + detailed backtrace. Assertions shall only be used to catch conditions that are + never supposed to happen without a serious bug in libcamera that would prevent + safe recovery. They shall never be used to validate API contracts. The + assertion conditions shall not cause any side effect as they are compiled out + in non-debug mode. + +C Compatibility Headers +~~~~~~~~~~~~~~~~~~~~~~~ + +The C++ standard defines a set of C++ standard library headers, and for some of +them, defines C compatibility headers. The former have a name of the form + while the later are named . The C++ headers declare names in the +std namespace, and may declare the same names in the global namespace. The C +compatibility headers declare names in the global namespace, and may declare +the same names in the std namespace. Code shall not rely on the optional +declaration of names in the global or std namespace. + +Usage of the C compatibility headers is preferred, except for the math.h header. +Where math.h defines separate functions for different argument types (e.g. +abs(int), labs(long int), fabs(double) and fabsf(float)) and requires the +developer to pick the right function, cmath defines overloaded functions +(std::abs(int), std::abs(long int), std::abs(double) and std::abs(float) to let +the compiler select the right function. This avoids potential errors such as +calling abs(int) with a float argument, performing an unwanted implicit integer +conversion. For this reason, cmath is preferred over math.h. + + +Documentation +------------- + +All public and protected classes, structures, enumerations, macros, functions +and variables shall be documented with a Doxygen comment block, using the +Javadoc style with C-style comments. When documenting private member functions +and variables the same Doxygen style shall be used as for public and protected +members. + +Documentation relates to header files, but shall be stored in the .cpp source +files in order to group the implementation and documentation. Every documented +header file shall have a \file documentation block in the .cpp source file. + +The following comment block shows an example of correct documentation for a +member function of the PipelineHandler class. + +:: + + /** + * \fn PipelineHandler::start() + * \brief Start capturing from a group of streams + * \param[in] camera The camera to start + * + * Start the group of streams that have been configured for capture by + * \a configureStreams(). The intended caller of this function is the Camera + * class which will in turn be called from the application to indicate that + * it has configured the streams and is ready to capture. + * + * \return 0 on success or a negative error code otherwise + */ + +The comment block shall be placed right before the function it documents. If +the function is defined inline in the class definition in the header file, the +comment block shall be placed alone in the .cpp source file in the same order +as the function definitions in the header file and shall start with an \fn +line. Otherwise no \fn line shall be present. + +The \brief directive shall be present. If the function takes parameters, \param +directives shall be present, with the appropriate [in], [out] or [inout] +specifiers. Only when the direction of the parameters isn't known (for instance +when defining a template function with variadic arguments) the direction +specifier shall be omitted. The \return directive shall be present when the +function returns a value, and shall be omitted otherwise. + +The long description is optional. When present it shall be surrounded by empty +lines and may span multiple paragraphs. No blank lines shall otherwise be added +between the \fn, \brief, \param and \return directives. + + +Tools +----- + +The 'clang-format' code formatting tool can be used to reformat source files +with the libcamera coding style, defined in the .clang-format file at the root +of the source tree. + +As clang-format is a code formatter, it operates on full files and outputs +reformatted source code. While it can be used to reformat code before sending +patches, it may generate unrelated changes. To avoid this, libcamera provides a +'checkstyle.py' script wrapping the formatting tools to only retain related +changes. This should be used to validate modifications before submitting them +for review. + +The script operates on one or multiple git commits specified on the command +line. It does not modify the git tree, the index or the working directory and +is thus safe to run at any point. + +Commits are specified using the same revision range syntax as 'git log'. The +most usual use cases are to specify a single commit by sha1, branch name or tag +name, or a commit range with the .. syntax. When no arguments are +given, the topmost commit of the current branch is selected. + +:: + + $ ./utils/checkstyle.py cc7d204b2c51 + ---------------------------------------------------------------------------------- + cc7d204b2c51853f7d963d144f5944e209e7ea29 libcamera: Use the logger instead of cout + ---------------------------------------------------------------------------------- + No style issue detected + +When operating on a range of commits, style checks are performed on each commit +from oldest to newest. + +:: + + $ ../utils/checkstyle.py 3b56ddaa96fb~3..3b56ddaa96fb + ---------------------------------------------------------------------------------- + b4351e1a6b83a9cfbfc331af3753602a02dbe062 libcamera: log: Fix Doxygen documentation + ---------------------------------------------------------------------------------- + No style issue detected + + -------------------------------------------------------------------------------------- + 6ab3ff4501fcfa24db40fcccbce35bdded7cd4bc libcamera: log: Document the LogMessage class + -------------------------------------------------------------------------------------- + No style issue detected + + --------------------------------------------------------------------------------- + 3b56ddaa96fbccf4eada05d378ddaa1cb6209b57 build: Add 'std=c++11' cpp compiler flag + --------------------------------------------------------------------------------- + Commit doesn't touch source files, skipping + +Commits that do not touch any .c, .cpp or .h files are skipped. + +:: + + $ ./utils/checkstyle.py edbd2059d8a4 + ---------------------------------------------------------------------- + edbd2059d8a4bd759302ada4368fa4055638fd7f libcamera: Add initial logger + ---------------------------------------------------------------------- + --- src/libcamera/include/log.h + +++ src/libcamera/include/log.h + @@ -21,11 +21,14 @@ + { + public: + LogMessage(const char *fileName, unsigned int line, + - LogSeverity severity); + - LogMessage(const LogMessage&) = delete; + + LogSeverity severity); + + LogMessage(const LogMessage &) = delete; + ~LogMessage(); + + - std::ostream& stream() { return msgStream; } + + std::ostream &stream() + + { + + return msgStream; + + } + + private: + std::ostringstream msgStream; + + --- src/libcamera/log.cpp + +++ src/libcamera/log.cpp + @@ -42,7 +42,7 @@ + + static const char *log_severity_name(LogSeverity severity) + { + - static const char * const names[] = { + + static const char *const names[] = { + "INFO", + "WARN", + " ERR", + + --- + 2 potential style issues detected, please review + +When potential style issues are detected, they are displayed in the form of a +diff that fixes the issues, on top of the corresponding commit. As the script is +in early development false positive are expected. The flagged issues should be +reviewed, but the diff doesn't need to be applied blindly. + +Execution of checkstyle.py can be automated through git commit hooks. Example +of pre-commit and post-commit hooks are available in `utils/hooks/pre-commit` +and `utils/hooks/post-commit`. You can install either hook by copying it to +`.git/hooks/`. The post-commit hook is easier to start with as it will only flag +potential issues after committing, while the pre-commit hook will abort the +commit if issues are detected and requires usage of `git commit --no-verify` to +ignore false positives. + +Happy hacking, libcamera awaits your patches! diff --git a/spider-cam/libcamera/Documentation/conf.py b/spider-cam/libcamera/Documentation/conf.py new file mode 100644 index 0000000..7eeea7f --- /dev/null +++ b/spider-cam/libcamera/Documentation/conf.py @@ -0,0 +1,172 @@ +# SPDX-License-Identifier: CC-BY-SA-4.0 +# -*- coding: utf-8 -*- +# +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/master/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + + +# -- Project information ----------------------------------------------------- + +project = 'libcamera' +copyright = '2018-2019, The libcamera documentation authors' +author = u'Kieran Bingham, Jacopo Mondi, Laurent Pinchart, Niklas Söderlund' + +# Version information is provided by the build environment, through the +# sphinx command line. + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = [] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = None + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'theme' +html_theme_path = ['.'] + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = [] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'libcameradoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'libcamera.tex', 'libcamera Documentation', + author, 'manual'), +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'libcamera', 'libcamera Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'libcamera', 'libcamera Documentation', + author, 'libcamera', 'One line description of project.', + 'Miscellaneous'), +] + + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] diff --git a/spider-cam/libcamera/Documentation/contributing.rst b/spider-cam/libcamera/Documentation/contributing.rst new file mode 100644 index 0000000..18b1914 --- /dev/null +++ b/spider-cam/libcamera/Documentation/contributing.rst @@ -0,0 +1,142 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +Contributing +============ + +libcamera is developed as a free software project and welcomes contributors. +Whether you would like to help with coding, documentation, testing, proposing +new features, or just discussing the project with the community, you can join +our official public communication channels, or simply check out the code. + +The project adheres to a :ref:`code of conduct ` that +maintainers, contributors and community members are expected to follow in all +online and offline communication. + +Mailing List +------------ + +We use a public mailing list as our main means of communication. You can find +subscription information and the messages archive on the `libcamera-devel`_ +list information page. + +.. _libcamera-devel: https://lists.libcamera.org/listinfo/libcamera-devel + +IRC Channel +----------- + +For informal and real time discussions, our IRC channel on irc.oftc.net is open +to the public. Point your IRC client to #libcamera to say hello, or use the +`WebChat`_. + +.. _WebChat: https://webchat.oftc.net/?channels=libcamera + +Source Code +----------- + +libcamera is in early stages of development, and no releases are available yet. +The source code is available from the project's `git tree`_. + +.. code-block:: shell + + $ git clone https://git.libcamera.org/libcamera/libcamera.git + +.. _git tree: https://git.libcamera.org/libcamera/libcamera.git/ + +A mirror is also hosted on `LinuxTV`_. + +.. _LinuxTV: https://git.linuxtv.org/libcamera.git/ + +Issue Tracker +------------- + +Our `issue tracker`_ tracks all bugs, issues and feature requests. All issues +are publicly visible, and you can register for an account to create new issues. + +.. _issue tracker: https://bugs.libcamera.org/ + +Documentation +------------- + +Project documentation is created using `Sphinx`_. Source level documentation +uses `Doxygen`_. Please make sure to document all code during development. + +.. _Sphinx: https://www.sphinx-doc.org +.. _Doxygen: https://www.doxygen.nl + +Submitting Patches +------------------ + +The libcamera project has high standards of stability, efficiency and +reliability. To achieve those, the project goes to great length to produce +code that is as easy to read, understand and maintain as possible. This is +made possible by a set of :ref:`coding-style-guidelines` that all submissions +are expected to follow. + +We also care about the quality of commit messages. A good commit message not +only describes what a commit does, but why it does so. By conveying clear +information about the purpose of the commit, it helps speeding up reviews. +Regardless of whether you're new to git or have years of experience, +https://cbea.ms/git-commit/ is always a good guide to read to improve your +commit message writing skills. + +The patch submission process for libcamera is similar to the Linux kernel, and +goes through the `libcamera-devel`_ mailing list. If you have no previous +experience with ``git-send-email``, or just experience trouble configuring it +for your e-mail provider, the sourcehut developers have put together a detailed +guide available at https://git-send-email.io/. + +Patches submitted to the libcamera project must be certified as suitable for +integration into an open source project. As such libcamera follows the same +model as utilised by the Linux kernel, and requires the use of 'Signed-off-by:' +tags in all patches. + +By signing your contributions you are certifying your work in accordance with +the following: + +`Developer's Certificate of Origin`_ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. + + +.. _Developer's Certificate of Origin: https://developercertificate.org/ + +.. toctree:: + :hidden: + + Code of Conduct + Coding Style diff --git a/spider-cam/libcamera/Documentation/docs.rst b/spider-cam/libcamera/Documentation/docs.rst new file mode 100644 index 0000000..a6e8a59 --- /dev/null +++ b/spider-cam/libcamera/Documentation/docs.rst @@ -0,0 +1,400 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +.. contents:: + :local: + +************* +Documentation +************* + +.. toctree:: + :hidden: + + API + +API +=== + +The libcamera API is extensively documented using Doxygen. The :ref:`API +nightly build ` contains the most up-to-date API documentation, built from +the latest master branch. + +Feature Requirements +==================== + +Device enumeration +------------------ + +The library shall support enumerating all camera devices available in the +system, including both fixed cameras and hotpluggable cameras. It shall +support cameras plugged and unplugged after the initialization of the +library, and shall offer a mechanism to notify applications of camera plug +and unplug. + +The following types of cameras shall be supported: + +* Internal cameras designed for point-and-shoot still image and video + capture usage, either controlled directly by the CPU, or exposed through + an internal USB bus as a UVC device. + +* External UVC cameras designed for video conferencing usage. + +Other types of camera, including analog cameras, depth cameras, thermal +cameras, external digital picture or movie cameras, are out of scope for +this project. + +A hardware device that includes independent camera sensors, such as front +and back sensors in a phone, shall be considered as multiple camera devices +for the purpose of this library. + +Independent Camera Devices +-------------------------- + +When multiple cameras are present in the system and are able to operate +independently from each other, the library shall expose them as multiple +camera devices and support parallel operation without any additional usage +restriction apart from the limitations inherent to the hardware (such as +memory bandwidth, CPU usage or number of CSI-2 receivers for instance). + +Independent processes shall be able to use independent cameras devices +without interfering with each other. A single camera device shall be +usable by a single process at a time. + +Multiple streams support +------------------------ + +The library shall support multiple video streams running in parallel +for each camera device, within the limits imposed by the system. + +Per frame controls +------------------ + +The library shall support controlling capture parameters for each stream +on a per-frame basis, on a best effort basis based on the capabilities of the +hardware and underlying software stack (including kernel drivers and +firmware). It shall apply capture parameters to the frame they target, and +report the value of the parameters that have effectively been used for each +captured frame. + +When a camera device supports multiple streams, the library shall allow both +control of each stream independently, and control of multiple streams +together. Streams that are controlled together shall be synchronized. No +synchronization is required for streams controlled independently. + +Capability Enumeration +---------------------- + +The library shall expose capabilities of each camera device in a way that +allows applications to discover those capabilities dynamically. Applications +shall be allowed to cache capabilities for as long as they are using the +library. If capabilities can change at runtime, the library shall offer a +mechanism to notify applications of such changes. Applications shall not +cache capabilities in long term storage between runs. + +Capabilities shall be discovered dynamically at runtime from the device when +possible, and may come, in part or in full, from platform configuration +data. + +Device Profiles +--------------- + +The library may define different camera device profiles, each with a minimum +set of required capabilities. Applications may use those profiles to quickly +determine the level of features exposed by a device without parsing the full +list of capabilities. Camera devices may implement additional capabilities +on top of the minimum required set for the profile they expose. + +3A and Image Enhancement Algorithms +----------------------------------- + +The camera devices shall implement auto exposure, auto gain and auto white +balance. Camera devices that include a focus lens shall implement auto +focus. Additional image enhancement algorithms, such as noise reduction or +video stabilization, may be implemented. + +All algorithms may be implemented in hardware or firmware outside of the +library, or in software in the library. They shall all be controllable by +applications. + +The library shall be architectured to isolate the 3A and image enhancement +algorithms in a component with a documented API, respectively called the 3A +component and the 3A API. The 3A API shall be stable, and shall allow both +open-source and closed-source implementations of the 3A component. + +The library may include statically-linked open-source 3A components, and +shall support dynamically-linked open-source and closed-source 3A +components. + +Closed-source 3A Component Sandboxing +------------------------------------- + +For security purposes, it may be desired to run closed-source 3A components +in a separate process. The 3A API would in such a case be transported over +IPC. The 3A API shall make it possible to use any IPC mechanism that +supports passing file descriptors. + +The library may implement an IPC mechanism, and shall support third-party +platform-specific IPC mechanisms through the implementation of a +platform-specific 3A API wrapper. No modification to the library shall be +needed to use such third-party IPC mechanisms. + +The 3A component shall not directly access any device node on the system. +Such accesses shall instead be performed through the 3A API. The library +shall validate all accesses and restrict them to what is absolutely required +by 3A components. + +V4L2 Compatibility Layer +------------------------ + +The project shall support traditional V4L2 application through an additional +libcamera wrapper library. The wrapper library shall trap all accesses to +camera devices through `LD_PRELOAD`, and route them through libcamera to +emulate a high-level V4L2 camera device. It shall expose camera device +features on a best-effort basis, and aim for the level of features +traditionally available from a UVC camera designed for video conferencing. + +Android Camera HAL v3 Compatibility +----------------------------------- + +The library API shall expose all the features required to implement an +Android Camera HAL v3 on top of libcamera. Some features of the HAL may be +omitted as long as they can be implemented separately in the HAL, such as +JPEG encoding, or YUV reprocessing. + + +Camera Stack +============ + +:: + + a c / +-------------+ +-------------+ +-------------+ +-------------+ + p a | | Native | | Framework | | Native | | Android | + p t | | V4L2 | | Application | | libcamera | | Camera | + l i | | Application | | (gstreamer) | | Application | | Framework | + i o \ +-------------+ +-------------+ +-------------+ +-------------+ + n ^ ^ ^ ^ + | | | | + l a | | | | + i d v v | v + b a / +-------------+ +-------------+ | +-------------+ + c p | | V4L2 | | Camera | | | Android | + a t | | Compat. | | Framework | | | Camera | + m a | | | | (gstreamer) | | | HAL | + e t \ +-------------+ +-------------+ | +-------------+ + r i ^ ^ | ^ + a o | | | | + n | | | | + / | ,................................................ + | | ! : Language : ! + l f | | ! : Bindings : ! + i r | | ! : (optional) : ! + b a | | \...............................................' + c m | | | | | + a e | | | | | + m w | v v v v + e o | +----------------------------------------------------------------+ + r r | | | + a k | | libcamera | + | | | + \ +----------------------------------------------------------------+ + ^ ^ ^ + Userspace | | | + ------------------------ | ---------------- | ---------------- | --------------- + Kernel | | | + v v v + +-----------+ +-----------+ +-----------+ + | Media | <--> | Video | <--> | V4L2 | + | Device | | Device | | Subdev | + +-----------+ +-----------+ +-----------+ + +The camera stack comprises four software layers. From bottom to top: + +* The kernel drivers control the camera hardware and expose a + low-level interface to userspace through the Linux kernel V4L2 + family of APIs (Media Controller API, V4L2 Video Device API and + V4L2 Subdev API). + +* The libcamera framework is the core part of the stack. It + handles all control of the camera devices in its core component, + libcamera, and exposes a native C++ API to upper layers. Optional + language bindings allow interfacing to libcamera from other + programming languages. + + Those components live in the same source code repository and + all together constitute the libcamera framework. + +* The libcamera adaptation is an umbrella term designating the + components that interface to libcamera in other frameworks. + Notable examples are a V4L2 compatibility layer, a gstreamer + libcamera element, and an Android camera HAL implementation based + on libcamera. + + Those components can live in the libcamera project source code + in separate repositories, or move to their respective project's + repository (for instance the gstreamer libcamera element). + +* The applications and upper level frameworks are based on the + libcamera framework or libcamera adaptation, and are outside of + the scope of the libcamera project. + + +libcamera Architecture +====================== + +:: + + ---------------------------< libcamera Public API >--------------------------- + ^ ^ + | | + v v + +-------------+ +-------------------------------------------------+ + | Camera | | Camera Device | + | Devices | | +---------------------------------------------+ | + | Manager | | | Device-Agnostic | | + +-------------+ | | | | + ^ | | +------------------------+ | + | | | | ~~~~~~~~~~~~~~~~~~~~~ | + | | | | { +---------------+ } | + | | | | } | ////Image//// | { | + | | | | <-> | /Processing// | } | + | | | | } | /Algorithms// | { | + | | | | { +---------------+ } | + | | | | ~~~~~~~~~~~~~~~~~~~~~ | + | | | | ======================== | + | | | | +---------------+ | + | | | | | //Pipeline/// | | + | | | | <-> | ///Handler/// | | + | | | | | ///////////// | | + | | +--------------------+ +---------------+ | + | | Device-Specific | + | +-------------------------------------------------+ + | ^ ^ + | | | + v v v + +--------------------------------------------------------------------+ + | Helpers and Support Classes | + | +-------------+ +-------------+ +-------------+ +-------------+ | + | | MC & V4L2 | | Buffers | | Sandboxing | | Plugins | | + | | Support | | Allocator | | IPC | | Manager | | + | +-------------+ +-------------+ +-------------+ +-------------+ | + | +-------------+ +-------------+ | + | | Pipeline | | ... | | + | | Runner | | | | + | +-------------+ +-------------+ | + +--------------------------------------------------------------------+ + + /// Device-Specific Components + ~~~ Sandboxing + +While offering a unified API towards upper layers, and presenting +itself as a single library, libcamera isn't monolithic. It exposes +multiple components through its public API, is built around a set of +separate helpers internally, uses device-specific components and can +load dynamic plugins. + +Camera Devices Manager + The Camera Devices Manager provides a view of available cameras + in the system. It performs cold enumeration and runtime camera + management, and supports a hotplug notification mechanism in its + public API. + + To avoid the cost associated with cold enumeration of all devices + at application start, and to arbitrate concurrent access to camera + devices, the Camera Devices Manager could later be split to a + separate service, possibly with integration in platform-specific + device management. + +Camera Device + The Camera Device represents a camera device to upper layers. It + exposes full control of the device through the public API, and is + thus the highest level object exposed by libcamera. + + Camera Device instances are created by the Camera Devices + Manager. An optional function to create new instances could be exposed + through the public API to speed up initialization when the upper + layer knows how to directly address camera devices present in the + system. + +Pipeline Handler + The Pipeline Handler manages complex pipelines exposed by the kernel drivers + through the Media Controller and V4L2 APIs. It abstracts pipeline handling to + hide device-specific details to the rest of the library, and implements both + pipeline configuration based on stream configuration, and pipeline runtime + execution and scheduling when needed by the device. + + This component is device-specific and is part of the libcamera code base. As + such it is covered by the same free software license as the rest of libcamera + and needs to be contributed upstream by device vendors. The Pipeline Handler + lives in the same process as the rest of the library, and has access to all + helpers and kernel camera-related devices. + +Image Processing Algorithms + Together with the hardware image processing and hardware statistics + collection, the Image Processing Algorithms implement 3A (Auto-Exposure, + Auto-White Balance and Auto-Focus) and other algorithms. They run on the CPU + and interact with the kernel camera devices to control hardware image + processing based on the parameters supplied by upper layers, closing the + control loop of the ISP. + + This component is device-specific and is loaded as an external plugin. It can + be part of the libcamera code base, in which case it is covered by the same + license, or provided externally as an open-source or closed-source component. + + The component is sandboxed and can only interact with libcamera through + internal APIs specifically marked as such. In particular it will have no + direct access to kernel camera devices, and all its accesses to image and + metadata will be mediated by dmabuf instances explicitly passed to the + component. The component must be prepared to run in a process separate from + the main libcamera process, and to have a very restricted view of the system, + including no access to networking APIs and limited access to file systems. + + The sandboxing mechanism isn't defined by libcamera. One example + implementation will be provided as part of the project, and platforms vendors + will be able to provide their own sandboxing mechanism as a plugin. + + libcamera should provide a basic implementation of Image Processing + Algorithms, to serve as a reference for the internal API. Device vendors are + expected to provide a full-fledged implementation compatible with their + Pipeline Handler. One goal of the libcamera project is to create an + environment in which the community will be able to compete with the + closed-source vendor binaries and develop a high quality open source + implementation. + +Helpers and Support Classes + While Pipeline Handlers are device-specific, implementations are expected to + share code due to usage of identical APIs towards the kernel camera drivers + and the Image Processing Algorithms. This includes without limitation handling + of the MC and V4L2 APIs, buffer management through dmabuf, and pipeline + discovery, configuration and scheduling. Such code will be factored out to + helpers when applicable. + + Other parts of libcamera will also benefit from factoring code out to + self-contained support classes, even if such code is present only once in the + code base, in order to keep the source code clean and easy to read. This + should be the case for instance for plugin management. + + +V4L2 Compatibility Layer +------------------------ + +V4L2 compatibility is achieved through a shared library that traps all +accesses to camera devices and routes them to libcamera to emulate high-level +V4L2 camera devices. It is injected in a process address space through +`LD_PRELOAD` and is completely transparent for applications. + +The compatibility layer exposes camera device features on a best-effort basis, +and aims for the level of features traditionally available from a UVC camera +designed for video conferencing. + + +Android Camera HAL +------------------ + +Camera support for Android is achieved through a generic Android +camera HAL implementation on top of libcamera. The HAL will implement internally +features required by Android and missing from libcamera, such as JPEG encoding +support. + +The Android camera HAL implementation will initially target the +LIMITED hardware level, with support for the FULL level then being gradually +implemented. diff --git a/spider-cam/libcamera/Documentation/environment_variables.rst b/spider-cam/libcamera/Documentation/environment_variables.rst new file mode 100644 index 0000000..4e9fbb2 --- /dev/null +++ b/spider-cam/libcamera/Documentation/environment_variables.rst @@ -0,0 +1,164 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +Environment variables +===================== + +The libcamera behaviour can be tuned through environment variables. This +document lists all the available variables and describes their usage. + +List of variables +----------------- + +LIBCAMERA_LOG_FILE + The custom destination for log output. + + Example value: ``/home/{user}/camera_log.log`` + +LIBCAMERA_LOG_LEVELS + Configure the verbosity of log messages for different categories (`more `__). + + Example value: ``*:DEBUG`` + +LIBCAMERA_LOG_NO_COLOR + Disable coloring of log messages (`more `__). + +LIBCAMERA_IPA_CONFIG_PATH + Define custom search locations for IPA configurations (`more `__). + + Example value: ``${HOME}/.libcamera/share/ipa:/opt/libcamera/vendor/share/ipa`` + +LIBCAMERA_IPA_FORCE_ISOLATION + When set to a non-empty string, force process isolation of all IPA modules. + + Example value: ``1`` + +LIBCAMERA_IPA_MODULE_PATH + Define custom search locations for IPA modules (`more `__). + + Example value: ``${HOME}/.libcamera/lib:/opt/libcamera/vendor/lib`` + +LIBCAMERA_PIPELINES_MATCH_LIST + Define an ordered list of pipeline names to be used to match the media + devices in the system. The pipeline handler names used to populate the + variable are the ones passed to the REGISTER_PIPELINE_HANDLER() macro in the + source code. + + Example value: ``rkisp1,simple`` + +LIBCAMERA_RPI_CONFIG_FILE + Define a custom configuration file to use in the Raspberry Pi pipeline handler. + + Example value: ``/usr/local/share/libcamera/pipeline/rpi/vc4/minimal_mem.yaml`` + +Further details +--------------- + +Notes about debugging +~~~~~~~~~~~~~~~~~~~~~ + +The environment variables ``LIBCAMERA_LOG_FILE``, ``LIBCAMERA_LOG_LEVELS`` and +``LIBCAMERA_LOG_NO_COLOR`` are used to modify the default configuration of the +libcamera logger. + +By default, libcamera logs all messages to the standard error (std::cerr). +Messages are colored by default depending on the log level. Coloring can be +disabled by setting the ``LIBCAMERA_LOG_NO_COLOR`` environment variable. + +The default log destination can also be directed to a file by setting the +``LIBCAMERA_LOG_FILE`` environment variable to the log file name. This also +disables coloring. + +Log levels are controlled through the ``LIBCAMERA_LOG_LEVELS`` variable, which +accepts a comma-separated list of 'category:level' pairs. + +The `level `__ part is mandatory and can either be specified by +name or by numerical index associated with each level. + +The optional `category `__ is a string matching the categories +defined by each file in the source base using the logging infrastructure. It +can include a wildcard ('*') character at the end to match multiple categories. + +For more information refer to the `API documentation `__. + +Examples: + +Enable full debug output to a separate file, for every `category `__ +within a local environment: + +.. code:: bash + + :~$ LIBCAMERA_LOG_FILE='/tmp/example_log.log' \ + LIBCAMERA_LOG_LEVELS=0 \ + cam --list + +Enable full debug output for the categories ``Camera`` and ``V4L2`` within a +global environment: + +.. code:: bash + + :~$ export LIBCAMERA_LOG_LEVELS='Camera:DEBUG,V4L2:DEBUG' + :~$ cam --list + +Log levels +~~~~~~~~~~ + +This is the list of available log levels, notice that all levels below +the chosen one are printed, while those above are discarded. + +- DEBUG (0) +- INFO (1) +- WARN (2) +- ERROR (3) +- FATAL (4) + +Example: +If you choose WARN (2), you will be able to see WARN (2), ERROR (3) and FATAL (4) +but not DEBUG (0) and INFO (1). + +Log categories +~~~~~~~~~~~~~~ + +Every category represents a specific area of the libcamera codebase, +the names can be located within the source code, for example: +`src/libcamera/camera_manager.cpp `__ + +.. code:: cpp + + LOG_DEFINE_CATEGORY(Camera) + +There are two available macros used to assign a category name to a part of the +libcamera codebase: + +LOG_DEFINE_CATEGORY + This macro is required, in order to use the ``LOGC`` macro for a particular + category. It can only be used once for each category. If you want to create + log messages within multiple compilation units for the same category utilize + the ``LOG_DECLARE_CATEGORY`` macro, in every file except the definition file. +LOG_DECLARE_CATEGORY + Used for sharing an already defined category between multiple separate + compilation units. + +Both macros have to be used within the libcamera namespace of the C++ source +code. + +IPA configuration +~~~~~~~~~~~~~~~~~ + +IPA modules use configuration files to store parameters. The format and +contents of the configuration files is specific to the IPA module. They usually +contain tuning parameters for the algorithms, in JSON format. + +The ``LIBCAMERA_IPA_CONFIG_PATH`` variable can be used to specify custom +storage locations to search for those configuration files. + +`Examples `__ + +IPA module +~~~~~~~~~~ + +In order to locate the correct IPA module for your hardware, libcamera gathers +existing IPA modules from multiple locations. The default locations for this +operation are the installed system path (for example on Debian: +``/usr/local/x86_64-pc-linux-gnu/libcamera``) and the build directory. +With the ``LIBCAMERA_IPA_MODULE_PATH``, you can specify a non-default location +to search for IPA modules. diff --git a/spider-cam/libcamera/Documentation/getting-started.rst b/spider-cam/libcamera/Documentation/getting-started.rst new file mode 100644 index 0000000..987f43f --- /dev/null +++ b/spider-cam/libcamera/Documentation/getting-started.rst @@ -0,0 +1,5 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 +.. Getting started information is defined in the project README file. +.. include:: ../README.rst + :start-after: .. section-begin-getting-started + :end-before: .. section-end-getting-started diff --git a/spider-cam/libcamera/Documentation/guides/application-developer.rst b/spider-cam/libcamera/Documentation/guides/application-developer.rst new file mode 100644 index 0000000..92e2a37 --- /dev/null +++ b/spider-cam/libcamera/Documentation/guides/application-developer.rst @@ -0,0 +1,639 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +Using libcamera in a C++ application +==================================== + +This tutorial shows how to create a C++ application that uses libcamera to +interface with a camera on a system, capture frames from it for 3 seconds, and +write metadata about the frames to standard output. + +Application skeleton +-------------------- + +Most of the code in this tutorial runs in the ``int main()`` function +with a separate global function to handle events. The two functions need +to share data, which are stored in global variables for simplicity. A +production-ready application would organize the various objects created +in classes, and the event handler would be a class member function to +provide context data without requiring global variables. + +Use the following code snippets as the initial application skeleton. +It already lists all the necessary includes directives and instructs the +compiler to use the libcamera namespace, which gives access to the libcamera +defined names and types without the need of prefixing them. + +.. code:: cpp + + #include + #include + #include + #include + + #include + + using namespace libcamera; + using namespace std::chrono_literals; + + int main() + { + // Code to follow + + return 0; + } + +Camera Manager +-------------- + +Every libcamera-based application needs an instance of a `CameraManager`_ that +runs for the life of the application. When the Camera Manager starts, it +enumerates all the cameras detected in the system. Behind the scenes, libcamera +abstracts and manages the complex pipelines that kernel drivers expose through +the `Linux Media Controller`_ and `Video for Linux`_ (V4L2) APIs, meaning that +an application doesn't need to handle device or driver specific details. + +.. _CameraManager: https://libcamera.org/api-html/classlibcamera_1_1CameraManager.html +.. _Linux Media Controller: https://www.kernel.org/doc/html/latest/media/uapi/mediactl/media-controller-intro.html +.. _Video for Linux: https://www.linuxtv.org/docs.php + +Before the ``int main()`` function, create a global shared pointer +variable for the camera to support the event call back later: + +.. code:: cpp + + static std::shared_ptr camera; + +Create a Camera Manager instance at the beginning of the main function, and then +start it. An application must only create a single Camera Manager instance. + +The CameraManager can be stored in a unique_ptr to automate deleting the +instance when it is no longer used, but care must be taken to ensure all +cameras are released explicitly before this happens. + +.. code:: cpp + + std::unique_ptr cm = std::make_unique(); + cm->start(); + +During the application initialization, the Camera Manager is started to +enumerate all the supported devices and create cameras that the application can +interact with. + +Once the camera manager is started, we can use it to iterate the available +cameras in the system: + +.. code:: cpp + + for (auto const &camera : cm->cameras()) + std::cout << camera->id() << std::endl; + +Printing the camera id lists the machine-readable unique identifiers, so for +example, the output on a Linux machine with a connected USB webcam is +``\_SB_.PCI0.XHC_.RHUB.HS08-8:1.0-5986:2115``. + +What libcamera considers a camera +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The libcamera library considers any unique source of video frames, which usually +correspond to a camera sensor, as a single camera device. Camera devices expose +streams, which are obtained by processing data from the single image source and +all share some basic properties such as the frame duration and the image +exposure time, as they only depend by the image source configuration. + +Applications select one or multiple Camera devices they wish to operate on, and +require frames from at least one of their Streams. + +Create and acquire a camera +--------------------------- + +This example application uses a single camera (the first enumerated one) that +the Camera Manager reports as available to applications. + +Camera devices are stored by the CameraManager in a list accessible by index, or +can be retrieved by name through the ``CameraManager::get()`` function. The +code below retrieves the name of the first available camera and gets the camera +by name from the Camera Manager, after making sure that at least one camera is +available. + +.. code:: cpp + + auto cameras = cm->cameras(); + if (cameras.empty()) { + std::cout << "No cameras were identified on the system." + << std::endl; + cm->stop(); + return EXIT_FAILURE; + } + + std::string cameraId = cameras[0]->id(); + + auto camera = cm->get(cameraId); + /* + * Note that `camera` may not compare equal to `cameras[0]`. + * In fact, it might simply be a `nullptr`, as the particular + * device might have disappeared (and reappeared) in the meantime. + */ + +Once a camera has been selected an application needs to acquire an exclusive +lock to it so no other application can use it. + +.. code:: cpp + + camera->acquire(); + +Configure the camera +-------------------- + +Before the application can do anything with the camera, it needs to configure +the image format and sizes of the streams it wants to capture frames from. + +Stream configurations are represented by instances of the +``StreamConfiguration`` class, which are grouped together in a +``CameraConfiguration`` object. Before an application can start setting its +desired configuration, a ``CameraConfiguration`` instance needs to be generated +from the ``Camera`` device using the ``Camera::generateConfiguration()`` +function. + +The libcamera library uses the ``StreamRole`` enumeration to define predefined +ways an application intends to use a camera. The +``Camera::generateConfiguration()`` function accepts a list of desired roles and +generates a ``CameraConfiguration`` with the best stream parameters +configuration for each of the requested roles. If the camera can handle the +requested roles, it returns an initialized ``CameraConfiguration`` and a null +pointer if it can't. + +It is possible for applications to generate an empty ``CameraConfiguration`` +instance by not providing any role. The desired configuration will have to be +filled-in manually and manually validated. + +In the example application, create a new configuration variable and use the +``Camera::generateConfiguration`` function to produce a ``CameraConfiguration`` +for the single ``StreamRole::Viewfinder`` role. + +.. code:: cpp + + std::unique_ptr config = camera->generateConfiguration( { StreamRole::Viewfinder } ); + +The generated ``CameraConfiguration`` has a ``StreamConfiguration`` instance for +each ``StreamRole`` the application requested. Each of these has a default size +and format that the camera assigned, and a list of supported pixel formats and +sizes. + +The code below accesses the first and only ``StreamConfiguration`` item in the +``CameraConfiguration`` and outputs its parameters to standard output. + +.. code:: cpp + + StreamConfiguration &streamConfig = config->at(0); + std::cout << "Default viewfinder configuration is: " << streamConfig.toString() << std::endl; + +This is expected to output something like: + + ``Default viewfinder configuration is: 1280x720-MJPEG`` + +Change and validate the configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +With an initialized ``CameraConfiguration``, an application can make changes to +the parameters it contains, for example, to change the width and height, use the +following code: + +.. code:: cpp + + streamConfig.size.width = 640; + streamConfig.size.height = 480; + +If an application changes any parameters, it must validate the configuration +before applying it to the camera using the ``CameraConfiguration::validate()`` +function. If the new values are not supported by the ``Camera`` device, the +validation process adjusts the parameters to what it considers to be the closest +supported values. + +The ``validate`` function returns a `Status`_ which applications shall check to +see if the Pipeline Handler adjusted the configuration. + +.. _Status: https://libcamera.org/api-html/classlibcamera_1_1CameraConfiguration.html#a64163f21db2fe1ce0a6af5a6f6847744 + +For example, the code above set the width and height to 640x480, but if the +camera cannot produce an image that large, it might adjust the configuration to +the supported size of 320x240 and return ``Adjusted`` as validation status +result. + +If the configuration to validate cannot be adjusted to a set of supported +values, the validation procedure fails and returns the ``Invalid`` status. + +For this example application, the code below prints the adjusted values to +standard out. + +.. code:: cpp + + config->validate(); + std::cout << "Validated viewfinder configuration is: " << streamConfig.toString() << std::endl; + +For example, the output might be something like + + ``Validated viewfinder configuration is: 320x240-MJPEG`` + +A validated ``CameraConfiguration`` can bet given to the ``Camera`` device to be +applied to the system. + +.. code:: cpp + + camera->configure(config.get()); + +If an application doesn't first validate the configuration before calling +``Camera::configure()``, there's a chance that calling the function can fail, if +the given configuration would have to be adjusted. + +Allocate FrameBuffers +--------------------- + +An application needs to reserve the memory that libcamera can write incoming +frames and data to, and that the application can then read. The libcamera +library uses ``FrameBuffer`` instances to represent memory buffers allocated in +memory. An application should reserve enough memory for the frame size the +streams need based on the configured image sizes and formats. + +The libcamera library consumes buffers provided by applications as +``FrameBuffer`` instances, which makes libcamera a consumer of buffers exported +by other devices (such as displays or video encoders), or allocated from an +external allocator (such as ION on Android). + +In some situations, applications do not have any means to allocate or get hold +of suitable buffers, for instance, when no other device is involved, or on Linux +platforms that lack a centralized allocator. The ``FrameBufferAllocator`` class +provides a buffer allocator an application can use in these situations. + +An application doesn't have to use the default ``FrameBufferAllocator`` that +libcamera provides. It can instead allocate memory manually and pass the buffers +in ``Request``\s (read more about ``Request`` in `the frame capture section +<#frame-capture>`_ of this guide). The example in this guide covers using the +``FrameBufferAllocator`` that libcamera provides. + +Using the libcamera ``FrameBufferAllocator`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Applications create a ``FrameBufferAllocator`` for a Camera and use it +to allocate buffers for streams of a ``CameraConfiguration`` with the +``allocate()`` function. + +The list of allocated buffers can be retrieved using the ``Stream`` instance +as the parameter of the ``FrameBufferAllocator::buffers()`` function. + +.. code:: cpp + + FrameBufferAllocator *allocator = new FrameBufferAllocator(camera); + + for (StreamConfiguration &cfg : *config) { + int ret = allocator->allocate(cfg.stream()); + if (ret < 0) { + std::cerr << "Can't allocate buffers" << std::endl; + return -ENOMEM; + } + + size_t allocated = allocator->buffers(cfg.stream()).size(); + std::cout << "Allocated " << allocated << " buffers for stream" << std::endl; + } + +Frame Capture +~~~~~~~~~~~~~ + +The libcamera library implements a streaming model based on per-frame requests. +For each frame an application wants to capture it must queue a request for it to +the camera. With libcamera, a ``Request`` is at least one ``Stream`` associated +with a ``FrameBuffer`` representing the memory location where frames have to be +stored. + +First, by using the ``Stream`` instance associated to each +``StreamConfiguration``, retrieve the list of ``FrameBuffer``\s created for it +using the frame allocator. Then create a vector of requests to be submitted to +the camera. + +.. code:: cpp + + Stream *stream = streamConfig.stream(); + const std::vector> &buffers = allocator->buffers(stream); + std::vector> requests; + +Proceed to fill the request vector by creating ``Request`` instances from the +camera device, and associate a buffer for each of them for the ``Stream``. + +.. code:: cpp + + for (unsigned int i = 0; i < buffers.size(); ++i) { + std::unique_ptr request = camera->createRequest(); + if (!request) + { + std::cerr << "Can't create request" << std::endl; + return -ENOMEM; + } + + const std::unique_ptr &buffer = buffers[i]; + int ret = request->addBuffer(stream, buffer.get()); + if (ret < 0) + { + std::cerr << "Can't set buffer for request" + << std::endl; + return ret; + } + + requests.push_back(std::move(request)); + } + +.. TODO: Controls + +.. TODO: A request can also have controls or parameters that you can apply to the image. + +Event handling and callbacks +---------------------------- + +The libcamera library uses the concept of `signals and slots` (similar to `Qt +Signals and Slots`_) to connect events with callbacks to handle them. + +.. _signals and slots: https://libcamera.org/api-html/classlibcamera_1_1Signal.html#details +.. _Qt Signals and Slots: https://doc.qt.io/qt-6/signalsandslots.html + +The ``Camera`` device emits two signals that applications can connect to in +order to execute callbacks on frame completion events. + +The ``Camera::bufferCompleted`` signal notifies applications that a buffer with +image data is available. Receiving notifications about the single buffer +completion event allows applications to implement partial request completion +support, and to inspect the buffer content before the request it is part of has +fully completed. + +The ``Camera::requestCompleted`` signal notifies applications that a request +has completed, which means all the buffers the request contains have now +completed. Request completion notifications are always emitted in the same order +as the requests have been queued to the camera. + +To receive the signals emission notifications, connect a slot function to the +signal to handle it in the application code. + +.. code:: cpp + + camera->requestCompleted.connect(requestComplete); + +For this example application, only the ``Camera::requestCompleted`` signal gets +handled and the matching ``requestComplete`` slot function outputs information +about the FrameBuffer to standard output. This callback is typically where an +application accesses the image data from the camera and does something with it. + +Signals operate in the libcamera ``CameraManager`` thread context, so it is +important not to block the thread for a long time, as this blocks internal +processing of the camera pipelines, and can affect realtime performance. + +Handle request completion events +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Create the ``requestComplete`` function by matching the slot signature: + +.. code:: cpp + + static void requestComplete(Request *request) + { + // Code to follow + } + +Request completion events can be emitted for requests which have been canceled, +for example, by unexpected application shutdown. To avoid an application +processing invalid image data, it's worth checking that the request has +completed successfully. The list of request completion statuses is available in +the `Request::Status`_ class enum documentation. + +.. _Request::Status: https://www.libcamera.org/api-html/classlibcamera_1_1Request.html#a2209ba8d51af8167b25f6e3e94d5c45b + +.. code:: cpp + + if (request->status() == Request::RequestCancelled) + return; + +If the ``Request`` has completed successfully, applications can access the +completed buffers using the ``Request::buffers()`` function, which returns a map +of ``FrameBuffer`` instances associated with the ``Stream`` that produced the +images. + +.. code:: cpp + + const std::map &buffers = request->buffers(); + +Iterating through the map allows applications to inspect each completed buffer +in this request, and access the metadata associated to each frame. + +The metadata buffer contains information such the capture status, a timestamp, +and the bytes used, as described in the `FrameMetadata`_ documentation. + +.. _FrameMetaData: https://libcamera.org/api-html/structlibcamera_1_1FrameMetadata.html + +.. code:: cpp + + for (auto bufferPair : buffers) { + FrameBuffer *buffer = bufferPair.second; + const FrameMetadata &metadata = buffer->metadata(); + } + +For this example application, inside the ``for`` loop from above, we can print +the Frame sequence number and details of the planes. + +.. code:: cpp + + std::cout << " seq: " << std::setw(6) << std::setfill('0') << metadata.sequence << " bytesused: "; + + unsigned int nplane = 0; + for (const FrameMetadata::Plane &plane : metadata.planes()) + { + std::cout << plane.bytesused; + if (++nplane < metadata.planes().size()) std::cout << "/"; + } + + std::cout << std::endl; + +The expected output shows each monotonically increasing frame sequence number +and the bytes used by planes. + +.. code:: text + + seq: 000000 bytesused: 1843200 + seq: 000002 bytesused: 1843200 + seq: 000004 bytesused: 1843200 + seq: 000006 bytesused: 1843200 + seq: 000008 bytesused: 1843200 + seq: 000010 bytesused: 1843200 + seq: 000012 bytesused: 1843200 + seq: 000014 bytesused: 1843200 + seq: 000016 bytesused: 1843200 + seq: 000018 bytesused: 1843200 + seq: 000020 bytesused: 1843200 + seq: 000022 bytesused: 1843200 + seq: 000024 bytesused: 1843200 + seq: 000026 bytesused: 1843200 + seq: 000028 bytesused: 1843200 + seq: 000030 bytesused: 1843200 + seq: 000032 bytesused: 1843200 + seq: 000034 bytesused: 1843200 + seq: 000036 bytesused: 1843200 + seq: 000038 bytesused: 1843200 + seq: 000040 bytesused: 1843200 + seq: 000042 bytesused: 1843200 + +A completed buffer contains of course image data which can be accessed through +the per-plane dma-buf file descriptor transported by the ``FrameBuffer`` +instance. An example of how to write image data to disk is available in the +`FileSink class`_ which is a part of the ``cam`` utility application in the +libcamera repository. + +.. _FileSink class: https://git.libcamera.org/libcamera/libcamera.git/tree/src/cam/file_sink.cpp + +With the handling of this request completed, it is possible to re-use the +request and the associated buffers and re-queue it to the camera +device: + +.. code:: cpp + + request->reuse(Request::ReuseBuffers); + camera->queueRequest(request); + +Request queueing +---------------- + +The ``Camera`` device is now ready to receive frame capture requests and +actually start delivering frames. In order to prepare for that, an application +needs to first start the camera, and queue requests to it for them to be +processed. + +In the main() function, just after having connected the +``Camera::requestCompleted`` signal to the callback handler, start the camera +and queue all the previously created requests. + +.. code:: cpp + + camera->start(); + for (std::unique_ptr &request : requests) + camera->queueRequest(request.get()); + +Event processing +~~~~~~~~~~~~~~~~ + +libcamera creates an internal execution thread at `CameraManager::start()`_ +time to decouple its own event processing from the application's main thread. +Applications are thus free to manage their own execution opportunely, and only +need to respond to events generated by libcamera emitted through signals. + +.. _CameraManager::start(): https://libcamera.org/api-html/classlibcamera_1_1CameraManager.html#a49e322880a2a26013bb0076788b298c5 + +Real-world applications will likely either integrate with the event loop of the +framework they use, or create their own event loop to respond to user events. +For the simple application presented in this example, it is enough to prevent +immediate termination by pausing for 3 seconds. During that time, the libcamera +thread will generate request completion events that the application will handle +in the ``requestComplete()`` slot connected to the ``Camera::requestCompleted`` +signal. + +.. code:: cpp + + std::this_thread::sleep_for(3000ms); + +Clean up and stop the application +--------------------------------- + +The application is now finished with the camera and the resources the camera +uses, so needs to do the following: + +- stop the camera +- free the buffers in the FrameBufferAllocator and delete it +- release the lock on the camera and reset the pointer to it +- stop the camera manager + +.. code:: cpp + + camera->stop(); + allocator->free(stream); + delete allocator; + camera->release(); + camera.reset(); + cm->stop(); + + return 0; + +In this instance the CameraManager will automatically be deleted by the +unique_ptr implementation when it goes out of scope. + +Build and run instructions +-------------------------- + +To build the application, we recommend that you use the `Meson build system`_ +which is also the official build system of the libcamera library. + +Make sure both ``meson`` and ``libcamera`` are installed in your system. Please +refer to your distribution documentation to install meson and install the most +recent version of libcamera from the `git repository`_. You would also need to +install the ``pkg-config`` tool to correctly identify the libcamera.so object +install location in the system. + +.. _Meson build system: https://mesonbuild.com/ +.. _git repository: https://git.libcamera.org/libcamera/libcamera.git/ + +Dependencies +~~~~~~~~~~~~ + +The test application presented here depends on the libcamera library to be +available in a path that meson can identify. The libcamera install procedure +performed using the ``ninja install`` command may by default deploy the +libcamera components in the ``/usr/local/lib`` path, or a package manager may +install it to ``/usr/lib`` depending on your distribution. If meson is unable to +find the location of the libcamera installation, you may need to instruct meson +to look into a specific path when searching for ``libcamera.so`` by setting the +``PKG_CONFIG_PATH`` environment variable to the right location. + +Adjust the following command to use the ``pkgconfig`` directory where libcamera +has been installed in your system. + +.. code:: shell + + export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig/ + +Verify that ``pkg-config`` can identify the ``libcamera`` library with + +.. code:: shell + + $ pkg-config --libs --cflags libcamera + -I/usr/local/include/libcamera -L/usr/local/lib -lcamera -lcamera-base + +``meson`` can alternatively use ``cmake`` to locate packages, please refer to +the ``meson`` documentation if you prefer to use it in place of ``pkgconfig`` + +Build file +~~~~~~~~~~ + +With the dependencies correctly identified, prepare a ``meson.build`` build file +to be placed in the same directory where the application lives. You can +name your application as you like, but be sure to update the following snippet +accordingly. In this example, the application file has been named +``simple-cam.cpp``. + +.. code:: + + project('simple-cam', 'cpp') + + simple_cam = executable('simple-cam', + 'simple-cam.cpp', + dependencies: dependency('libcamera', required : true)) + +The ``dependencies`` line instructs meson to ask ``pkgconfig`` (or ``cmake``) to +locate the ``libcamera`` library, which the test application will be +dynamically linked against. + +With the build file in place, compile and run the application with: + +.. code:: shell + + $ meson build + $ cd build + $ ninja + $ ./simple-cam + +It is possible to increase the library debug output by using environment +variables which control the library log filtering system: + +.. code:: shell + + $ LIBCAMERA_LOG_LEVELS=0 ./simple-cam diff --git a/spider-cam/libcamera/Documentation/guides/introduction.rst b/spider-cam/libcamera/Documentation/guides/introduction.rst new file mode 100644 index 0000000..700ec2d --- /dev/null +++ b/spider-cam/libcamera/Documentation/guides/introduction.rst @@ -0,0 +1,319 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +Developers guide to libcamera +============================= + +The Linux kernel handles multimedia devices through the 'Linux media' subsystem +and provides a set of APIs (application programming interfaces) known +collectively as V4L2 (`Video for Linux 2`_) and the `Media Controller`_ API +which provide an interface to interact and control media devices. + +Included in this subsystem are drivers for camera sensors, CSI2 (Camera +Serial Interface) receivers, and ISPs (Image Signal Processors) + +The usage of these drivers to provide a functioning camera stack is a +responsibility that lies in userspace which is commonly implemented separately +by vendors without a common architecture or API for application developers. + +libcamera provides a complete camera stack for Linux based systems to abstract +functionality desired by camera application developers and process the +configuration of hardware and image control algorithms required to obtain +desirable results from the camera. + +.. _Video for Linux 2: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/v4l/v4l2.html +.. _Media Controller: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/mediactl/media-controller.html + + +In this developers guide, we will explore the `Camera Stack`_ and how it is +can be visualised at a high level, and explore the internal `Architecture`_ of +the libcamera library with its components. The current `Platform Support`_ is +detailed, as well as an overview of the `Licensing`_ requirements of the +project. + +This introduction is followed by a walkthrough tutorial to newcomers wishing to +support a new platform with the `Pipeline Handler Writers Guide`_ and for those +looking to make use of the libcamera native API an `Application Writers Guide`_ +provides a tutorial of the key APIs exposed by libcamera. + +.. _Pipeline Handler Writers Guide: pipeline-handler.html +.. _Application Writers Guide: application-developer.html + +.. TODO: Correctly link to the other articles of the guide + +Camera Stack +------------ + +The libcamera library is implemented in userspace, and makes use of underlying +kernel drivers that directly interact with hardware. + +Applications can make use of libcamera through the native `libcamera API`_'s or +through an adaptation layer integrating libcamera into a larger framework. + +.. _libcamera API: https://www.libcamera.org/api-html/index.html + +:: + + Application Layer + / +--------------+ +--------------+ +--------------+ +--------------+ + | | Native | | Framework | | Native | | Android | + | | V4L2 | | Application | | libcamera | | Camera | + | | Application | | (gstreamer) | | Application | | Framework | + \ +--------------+ +--------------+ +--------------+ +--------------+ + + ^ ^ ^ ^ + | | | | + | | | | + v v | v + Adaptation Layer | + / +--------------+ +--------------+ | +--------------+ + | | V4L2 | | gstreamer | | | Android | + | | Compatibility| | element | | | Camera | + | | (preload) | |(libcamerasrc)| | | HAL | + \ +--------------+ +--------------+ | +--------------+ + | + ^ ^ | ^ + | | | | + | | | | + v v v v + libcamera Framework + / +--------------------------------------------------------------------+ + | | | + | | libcamera | + | | | + \ +--------------------------------------------------------------------+ + + ^ ^ ^ + Userspace | | | + --------------------- | ---------------- | ---------------- | --------------- + Kernel | | | + v v v + + +-----------+ +-----------+ +-----------+ + | Media | <--> | Video | <--> | V4L2 | + | Device | | Device | | Subdev | + +-----------+ +-----------+ +-----------+ + +The camera stack comprises of four software layers. From bottom to top: + +* The kernel drivers control the camera hardware and expose a low-level + interface to userspace through the Linux kernel V4L2 family of APIs + (Media Controller API, V4L2 Video Device API and V4L2 Subdev API). + +* The libcamera framework is the core part of the stack. It handles all control + of the camera devices in its core component, libcamera, and exposes a native + C++ API to upper layers. + +* The libcamera adaptation layer is an umbrella term designating the components + that interface to libcamera in other frameworks. Notable examples are the V4L2 + compatibility layer, the gstreamer libcamera element, and the Android camera + HAL implementation based on libcamera which are provided as a part of the + libcamera project. + +* The applications and upper level frameworks are based on the libcamera + framework or libcamera adaptation, and are outside of the scope of the + libcamera project, however example native applications (cam, qcam) are + provided for testing. + + +V4L2 Compatibility Layer + V4L2 compatibility is achieved through a shared library that traps all + accesses to camera devices and routes them to libcamera to emulate high-level + V4L2 camera devices. It is injected in a process address space through + ``LD_PRELOAD`` and is completely transparent for applications. + + The compatibility layer exposes camera device features on a best-effort basis, + and aims for the level of features traditionally available from a UVC camera + designed for video conferencing. + +Android Camera HAL + Camera support for Android is achieved through a generic Android camera HAL + implementation on top of libcamera. The HAL implements features required by + Android and out of scope from libcamera, such as JPEG encoding support. + + This component is used to provide support for ChromeOS platforms + +GStreamer element (gstlibcamerasrc) + A `GStreamer element`_ is provided to allow capture from libcamera supported + devices through GStreamer pipelines, and connect to other elements for further + processing. + + Development of this element is ongoing and is limited to a single stream. + +Native libcamera API + Applications can make use of the libcamera API directly using the C++ + API. An example application and walkthrough using the libcamera API can be + followed in the `Application Writers Guide`_ + +.. _GStreamer element: https://gstreamer.freedesktop.org/documentation/application-development/basics/elements.html + +Architecture +------------ + +While offering a unified API towards upper layers, and presenting itself as a +single library, libcamera isn't monolithic. It exposes multiple components +through its public API and is built around a set of separate helpers internally. +Hardware abstractions are handled through the use of device-specific components +where required and dynamically loadable plugins are used to separate image +processing algorithms from the core libcamera codebase. + +:: + + --------------------------< libcamera Public API >--------------------------- + ^ ^ + | | + v v + +-------------+ +---------------------------------------------------+ + | Camera | | Camera Device | + | Manager | | +-----------------------------------------------+ | + +-------------+ | | Device-Agnostic | | + ^ | | | | + | | | +--------------------------+ | + | | | | ~~~~~~~~~~~~~~~~~~~~~~~ | + | | | | { +-----------------+ } | + | | | | } | //// Image //// | { | + | | | | <-> | / Processing // | } | + | | | | } | / Algorithms // | { | + | | | | { +-----------------+ } | + | | | | ~~~~~~~~~~~~~~~~~~~~~~~ | + | | | | ========================== | + | | | | +-----------------+ | + | | | | | // Pipeline /// | | + | | | | <-> | /// Handler /// | | + | | | | | /////////////// | | + | | +--------------------+ +-----------------+ | + | | Device-Specific | + | +---------------------------------------------------+ + | ^ ^ + | | | + v v v + +--------------------------------------------------------------------+ + | Helpers and Support Classes | + | +-------------+ +-------------+ +-------------+ +-------------+ | + | | MC & V4L2 | | Buffers | | Sandboxing | | Plugins | | + | | Support | | Allocator | | IPC | | Manager | | + | +-------------+ +-------------+ +-------------+ +-------------+ | + | +-------------+ +-------------+ | + | | Pipeline | | ... | | + | | Runner | | | | + | +-------------+ +-------------+ | + +--------------------------------------------------------------------+ + + /// Device-Specific Components + ~~~ Sandboxing + + +Camera Manager + The Camera Manager enumerates cameras and instantiates Pipeline Handlers to + manage each Camera that libcamera supports. The Camera Manager supports + hotplug detection and notification events when supported by the underlying + kernel devices. + + There is only ever one instance of the Camera Manager running per application. + Each application's instance of the Camera Manager ensures that only a single + application can take control of a camera device at once. + + Read the `Camera Manager API`_ documentation for more details. + +.. _Camera Manager API: https://libcamera.org/api-html/classlibcamera_1_1CameraManager.html + +Camera Device + The Camera class represents a single item of camera hardware that is capable + of producing one or more image streams, and provides the API to interact with + the underlying device. + + If a system has multiple instances of the same hardware attached, each has its + own instance of the camera class. + + The API exposes full control of the device to upper layers of libcamera through + the public API, making it the highest level object libcamera exposes, and the + object that all other API operations interact with from configuration to + capture. + + Read the `Camera API`_ documentation for more details. + +.. _Camera API: https://libcamera.org/api-html/classlibcamera_1_1Camera.html + +Pipeline Handler + The Pipeline Handler manages the complex pipelines exposed by the kernel + drivers through the Media Controller and V4L2 APIs. It abstracts pipeline + handling to hide device-specific details from the rest of the library, and + implements both pipeline configuration based on stream configuration, and + pipeline runtime execution and scheduling when needed by the device. + + The Pipeline Handler lives in the same process as the rest of the library, and + has access to all helpers and kernel camera-related devices. + + Hardware abstraction is handled by device specific Pipeline Handlers which are + derived from the Pipeline Handler base class allowing commonality to be shared + among the implementations. + + Derived pipeline handlers create Camera device instances based on the devices + they detect and support on the running system, and are responsible for + managing the interactions with a camera device. + + More details can be found in the `PipelineHandler API`_ documentation, and the + `Pipeline Handler Writers Guide`_. + +.. _PipelineHandler API: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html + +Image Processing Algorithms + An image processing algorithm (IPA) component is a loadable plugin that + implements 3A (Auto-Exposure, Auto-White Balance, and Auto-Focus) and other + algorithms. + + The algorithms run on the CPU and interact with the camera devices through the + Pipeline Handler to control hardware image processing based on the parameters + supplied by upper layers, maintaining state and closing the control loop + of the ISP. + + The component is sandboxed and can only interact with libcamera through the + API provided by the Pipeline Handler and an IPA has no direct access to kernel + camera devices. + + Open source IPA modules built with libcamera can be run in the same process + space as libcamera, however external IPA modules are run in a separate process + from the main libcamera process. IPA modules have a restricted view of the + system, including no access to networking APIs and limited access to file + systems. + + IPA modules are only required for platforms and devices with an ISP controlled + by the host CPU. Camera sensors which have an integrated ISP are not + controlled through the IPA module. + +Platform Support +---------------- + +The library currently supports the following hardware platforms specifically +with dedicated pipeline handlers: + + - Intel IPU3 (ipu3) + - Rockchip RK3399 (rkisp1) + - RaspberryPi 3 and 4 (rpi/vc4) + +Furthermore, generic platform support is provided for the following: + + - USB video device class cameras (uvcvideo) + - iMX7, Allwinner Sun6i (simple) + - Virtual media controller driver for test use cases (vimc) + +Licensing +--------- + +The libcamera core, is covered by the `LGPL-2.1-or-later`_ license. Pipeline +Handlers are a part of the libcamera code base and need to be contributed +upstream by device vendors. IPA modules included in libcamera are covered by a +free software license, however third-parties may develop IPA modules outside of +libcamera and distribute them under a closed-source license, provided they do +not include source code from the libcamera project. + +The libcamera project itself contains multiple libraries, applications and +utilities. Licenses are expressed through SPDX tags in text-based files that +support comments, and through the .reuse/dep5 file otherwise. A copy of all +licenses are stored in the LICENSES directory, and a full summary of the +licensing used throughout the project can be found in the COPYING.rst document. + +Applications which link dynamically against libcamera and use only the public +API are an independent work of the authors and have no license restrictions +imposed upon them from libcamera. + +.. _LGPL-2.1-or-later: https://spdx.org/licenses/LGPL-2.1-or-later.html diff --git a/spider-cam/libcamera/Documentation/guides/ipa.rst b/spider-cam/libcamera/Documentation/guides/ipa.rst new file mode 100644 index 0000000..25deade --- /dev/null +++ b/spider-cam/libcamera/Documentation/guides/ipa.rst @@ -0,0 +1,531 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +IPA Writer's Guide +================== + +IPA modules are Image Processing Algorithm modules. They provide functionality +that the pipeline handler can use for image processing. + +This guide covers the definition of the IPA interface, and how to plumb the +connection between the pipeline handler and the IPA. + +The IPA interface and protocol +------------------------------ + +The IPA interface defines the interface between the pipeline handler and the +IPA. Specifically, it defines the functions that the IPA exposes that the +pipeline handler can call, and the signals that the pipeline handler can +connect to, in order to receive data from the IPA asynchronously. In addition, +it contains any custom data structures that the pipeline handler and IPA may +pass to each other. + +It is possible to use the same IPA interface with multiple pipeline handlers +on different hardware platforms. Generally in such cases, these platforms would +have a common hardware ISP pipeline. For instance, the rkisp1 pipeline handler +supports both the RK3399 and the i.MX8MP as they integrate the same ISP. +However, the i.MX8MP has a more complex camera pipeline, which may call for a +dedicated pipeline handler in the future. As the ISP is the same as for RK3399, +the same IPA interface could be used for both pipeline handlers. The build files +provide a mapping from pipeline handler to the IPA interface name as detailed in +:ref:`compiling-section`. + +The IPA protocol refers to the agreement between the pipeline handler and the +IPA regarding the expected response(s) from the IPA for given calls to the IPA. +This protocol doesn't need to be declared anywhere in code, but it shall be +documented, as there may be multiple IPA implementations for one pipeline +handler. + +As part of the design of libcamera, IPAs may be isolated in a separate process, +or run in the same process but a different thread from libcamera. The pipeline +handler and IPA shall not have to change their operation based on whether the +IPA is isolated or not, but the possibility of isolation needs to be kept in +mind. Therefore all data that is passed between them must be serializable, so +they must be defined separately in the `mojo Interface Definition Language`_ +(IDL), and a code generator will generate headers and serializers corresponding +to the definitions. Every interface is defined in a mojom file and includes: + +- the functions that the pipeline handler can call from the IPA +- signals in the pipeline handler that the IPA can emit +- any data structures that are to be passed between the pipeline handler and the IPA + +All IPA modules of a given pipeline handler use the same IPA interface. The IPA +interface definition is thus written by the pipeline handler author, based on +how they design the interactions between the pipeline handler and the IPA. + +The entire IPA interface, including the functions, signals, and any custom +structs shall be defined in a file named {interface_name}.mojom under +include/libcamera/ipa/. + +.. _mojo Interface Definition Language: https://chromium.googlesource.com/chromium/src.git/+/master/mojo/public/tools/bindings/README.md + +Namespacing +----------- + +To avoid name collisions between data types defined by different IPA interfaces +and data types defined by libcamera, each IPA interface must be defined in its +own namespace. + +The namespace is specific with mojo's module directive. It must be the first +non-comment line in the mojo data definition file. For example, the Raspberry +Pi IPA interface uses: + +.. code-block:: none + + module ipa.rpi; + +This will become the ipa::rpi namespace in C++ code. + +Data containers +--------------- + +Since the data passed between the pipeline handler and the IPA must support +serialization, any custom data containers must be defined with the mojo IDL. + +The following list of libcamera objects are supported in the interface +definition, and may be used as function parameter types or struct field types: + +- libcamera.ControlInfoMap +- libcamera.ControlList +- libcamera.FileDescriptor +- libcamera.IPABuffer +- libcamera.IPACameraSensorInfo +- libcamera.IPASettings +- libcamera.IPAStream +- libcamera.Point +- libcamera.Rectangle +- libcamera.Size +- libcamera.SizeRange + +To use them, core.mojom must be included in the mojo data definition file: + +.. code-block:: none + + import "include/libcamera/ipa/core.mojom"; + +Other custom structs may be defined and used as well. There is no requirement +that they must be defined before usage. enums and structs are supported. + +The following is an example of a definition of an enum, for the purpose of +being used as flags: + +.. code-block:: none + + enum ConfigParameters { + ConfigLsTable = 0x01, + ConfigStaggeredWrite = 0x02, + ConfigSensor = 0x04, + ConfigDropFrames = 0x08, + }; + +The following is an example of a definition of a struct: + +.. code-block:: none + + struct ConfigInput { + uint32 op; + uint32 transform; + libcamera.FileDescriptor lsTableHandle; + int32 lsTableHandleStatic = -1; + map streamConfig; + array buffers; + }; + +This example has some special things about it. First of all, it uses the +FileDescriptor data type. This type must be used to ensure that the file +descriptor that it contains is translated properly across the IPC boundary +(when the IPA is in an isolated process). + +This does mean that if the file descriptor should be sent without being +translated (for example, for the IPA to tell the pipeline handler which +fd *that the pipeline handler holds* to act on), then it must be in a +regular int32 type. + +This example also illustrates that struct fields may have default values, as +is assigned to lsTableHandleStatic. This is the value that the field will +take when the struct is constructed with the default constructor. + +Arrays and maps are supported as well. They are translated to C++ vectors and +maps, respectively. The members of the arrays and maps are embedded, and cannot +be const. + +Note that nullable fields, static-length arrays, handles, and unions, which +are supported by mojo, are not supported by our code generator. + +The Main IPA interface +---------------------- + +The IPA interface is split in two parts, the Main IPA interface, which +describes the functions that the pipeline handler can call from the IPA, +and the Event IPA interface, which describes the signals received by the +pipeline handler that the IPA can emit. Both must be defined. This section +focuses on the Main IPA interface. + +The main interface must be named as IPA{interface_name}Interface. + +The functions that the pipeline handler can call from the IPA may be +synchronous or asynchronous. Synchronous functions do not return until the IPA +returns from the function, while asynchronous functions return immediately +without waiting for the IPA to return. + +At a minimum, the following three functions must be present (and implemented): + +- init(); +- start(); +- stop(); + +All three of these functions are synchronous. The parameters for start() and +init() may be customized. + +init() initializes the IPA interface. It shall be called before any other +function of the IPAInterface. + +stop() informs the IPA module that the camera is stopped. The IPA module shall +release resources prepared in start(). + +A configure() function is recommended. Any ControlInfoMap instances that will be +used by the IPA must be sent to the IPA from the pipeline handler, at configure +time, for example. + +All input parameters will become const references, except for arithmetic types, +which will be passed by value. Output parameters will become pointers, unless +the first output parameter is an int32, or there is only one primitive output +parameter, in which case it will become a regular return value. + +const is not allowed inside of arrays and maps. mojo arrays will become C++ +std::vector<>. + +By default, all functions defined in the main interface are synchronous. This +means that in the case of IPC (i.e. isolated IPA), the function call will not +return until the return value or output parameters are ready. To specify an +asynchronous function, the [async] attribute can be used. Asynchronous +functions must not have any return value or output parameters, since in the +case of IPC the call needs to return immediately. + +It is also possible that the IPA will not be run in isolation. In this case, +the IPA thread will not exist until start() is called. This means that in the +case of no isolation, asynchronous calls cannot be made before start(). Since +the IPA interface must be the same regardless of isolation, the same +restriction applies to the case of isolation, and any function that will be +called before start() must be synchronous. + +In addition, any call made after start() and before stop() must be +asynchronous. The motivation for this is to avoid damaging real-time +performance of the pipeline handler. If the pipeline handler wants some data +from the IPA, the IPA should return the data asynchronously via an event +(see "The Event IPA interface"). + +The following is an example of a main interface definition: + +.. code-block:: none + + interface IPARPiInterface { + init(libcamera.IPASettings settings, string sensorName) + => (int32 ret, bool metadataSupport); + start() => (int32 ret); + stop(); + + configure(libcamera.IPACameraSensorInfo sensorInfo, + map streamConfig, + map entityControls, + ConfigInput ipaConfig) + => (int32 ret, ConfigOutput results); + + mapBuffers(array buffers); + unmapBuffers(array ids); + + [async] signalStatReady(uint32 bufferId); + [async] signalQueueRequest(libcamera.ControlList controls); + [async] signalIspPrepare(ISPConfig data); + }; + + +The first three functions are the required functions. Functions do not need to +have return values, like stop(), mapBuffers(), and unmapBuffers(). In the case +of asynchronous functions, as explained before, they *must not* have return +values. + +The Event IPA interface +----------------------- + +The event IPA interface describes the signals received by the pipeline handler +that the IPA can emit. It must be defined. If there are no event functions, +then it may be empty. These emissions are meant to notify the pipeline handler +of some event, such as request data is ready, and *must not* be used to drive +the camera pipeline from the IPA. + +The event interface must be named as IPA{interface_name}EventInterface. + +Functions defined in the event interface are implicitly asynchronous. +Thus they cannot return any value. Specifying the [async] tag is not +necessary. + +Functions defined in the event interface will become signals in the IPA +interface. The IPA can emit signals, while the pipeline handler can connect +slots to them. + +The following is an example of an event interface definition: + +.. code-block:: none + + interface IPARPiEventInterface { + statsMetadataComplete(uint32 bufferId, + libcamera.ControlList controls); + runIsp(uint32 bufferId); + embeddedComplete(uint32 bufferId); + setIsp(libcamera.ControlList controls); + setStaggered(libcamera.ControlList controls); + }; + +.. _compiling-section: + +Compiling the IPA interface +--------------------------- + +After the IPA interface is defined in include/libcamera/ipa/{interface_name}.mojom, +an entry for it must be added in meson so that it can be compiled. The filename +must be added to the pipeline_ipa_mojom_mapping variable in +include/libcamera/ipa/meson.build. This variable maps the pipeline handler name +to its IPA interface file. + +For example, adding the raspberrypi.mojom file to meson: + +.. code-block:: none + + pipeline_ipa_mojom_mapping = [ + 'rpi/vc4': 'raspberrypi.mojom', + ] + +This will cause the mojo data definition file to be compiled. Specifically, it +generates five files: + +- a header describing the custom data structures, and the complete IPA + interface (at {$build_dir}/include/libcamera/ipa/{interface}_ipa_interface.h) + +- a serializer implementing de/serialization for the custom data structures (at + {$build_dir}/include/libcamera/ipa/{interface}_ipa_serializer.h) + +- a proxy header describing a specialized IPA proxy (at + {$build_dir}/include/libcamera/ipa/{interface}_ipa_proxy.h) + +- a proxy source implementing the IPA proxy (at + {$build_dir}/src/libcamera/proxy/{interface}_ipa_proxy.cpp) + +- a proxy worker source implementing the other end of the IPA proxy (at + {$build_dir}/src/libcamera/proxy/worker/{interface}_ipa_proxy_worker.cpp) + +The IPA proxy serves as the layer between the pipeline handler and the IPA, and +handles threading vs isolation transparently. The pipeline handler and the IPA +only require the interface header and the proxy header. The serializer is only +used internally by the proxy. + +Using the custom data structures +-------------------------------- + +To use the custom data structures that are defined in the mojo data definition +file, the following header must be included: + +.. code-block:: C++ + + #include + +The POD types of the structs simply become their C++ counterparts, eg. uint32 +in mojo will become uint32_t in C++. mojo map becomes C++ std::map, and mojo +array becomes C++ std::vector. All members of maps and vectors are embedded, +and are not pointers. The members cannot be const. + +The names of all the fields of structs can be used in C++ in exactly the same +way as they are defined in the data definition file. For example, the following +struct as defined in the mojo file: + +.. code-block:: none + + struct SensorConfig { + uint32 gainDelay = 1; + uint32 exposureDelay; + uint32 sensorMetadata; + }; + +Will become this in C++: + +.. code-block:: C++ + + struct SensorConfig { + uint32_t gainDelay; + uint32_t exposureDelay; + uint32_t sensorMetadata; + }; + +The generated structs will also have two constructors, a constructor that +fills all fields with the default values, and a second constructor that takes +a value for every field. The default value constructor will fill in the fields +with the specified default value if it exists. In the above example, `gainDelay_` +will be initialized to 1. If no default value is specified, then it will be +filled in as zero (or -1 for a FileDescriptor type). + +All fields and constructors/destructors in these generated structs are public. + +Using the IPA interface (pipeline handler) +------------------------------------------ + +The following headers are necessary to use an IPA in the pipeline handler +(with raspberrypi as an example): + +.. code-block:: C++ + + #include + #include + +The first header includes definitions of the custom data structures, and +the definition of the complete IPA interface (including both the Main and +the Event IPA interfaces). The name of the header file comes from the name +of the mojom file, which in this case was raspberrypi.mojom. + +The second header includes the definition of the specialized IPA proxy. It +exposes the complete IPA interface. We will see how to use it in this section. + +In the pipeline handler, we first need to construct a specialized IPA proxy. +From the point of view of the pipeline hander, this is the object that is the +IPA. + +To do so, we invoke the IPAManager: + +.. code-block:: C++ + + std::unique_ptr ipa_ = + IPAManager::createIPA(pipe_, 1, 1); + +The ipa::rpi namespace comes from the namespace that we defined in the mojo +data definition file, in the "Namespacing" section. The name of the proxy, +IPAProxyRPi, comes from the name given to the main IPA interface, +IPARPiInterface, in the "The Main IPA interface" section. + +The return value of IPAManager::createIPA shall be error-checked, to confirm +that the returned pointer is not a nullptr. + +After this, before initializing the IPA, slots should be connected to all of +the IPA's signals, as defined in the Event IPA interface: + +.. code-block:: C++ + + ipa_->statsMetadataComplete.connect(this, &RPiCameraData::statsMetadataComplete); + ipa_->runIsp.connect(this, &RPiCameraData::runIsp); + ipa_->embeddedComplete.connect(this, &RPiCameraData::embeddedComplete); + ipa_->setIsp.connect(this, &RPiCameraData::setIsp); + ipa_->setStaggered.connect(this, &RPiCameraData::setStaggered); + +The slot functions have a function signature based on the function definition +in the Event IPA interface. All plain old data (POD) types are as-is (with +their C++ versions, eg. uint32 -> uint32_t), and all structs are const references. + +For example, for the following entry in the Event IPA interface: + +.. code-block:: none + + statsMetadataComplete(uint32 bufferId, ControlList controls); + +A function with the following function signature shall be connected to the +signal: + +.. code-block:: C++ + + void statsMetadataComplete(uint32_t bufferId, const ControlList &controls); + +After connecting the slots to the signals, the IPA should be initialized +(using the main interface definition example from earlier): + +.. code-block:: C++ + + IPASettings settings{}; + bool metadataSupport; + int ret = ipa_->init(settings, "sensor name", &metadataSupport); + +At this point, any IPA functions that were defined in the Main IPA interface +can be called as if they were regular member functions, for example (based on +the main interface definition example from earlier): + +.. code-block:: C++ + + ipa_->start(); + int ret = ipa_->configure(sensorInfo_, streamConfig, entityControls, ipaConfig, &result); + ipa_->signalStatReady(RPi::BufferMask::STATS | static_cast(index)); + +Remember that any functions designated as asynchronous *must not* be called +before start(). + +Notice that for both init() and configure(), the first output parameter is a +direct return, since it is an int32, while the other output parameter is a +pointer-based output parameter. + +Using the IPA interface (IPA Module) +------------------------------------ + +The following header is necessary to implement an IPA Module (with raspberrypi +as an example): + +.. code-block:: C++ + + #include + +This header includes definitions of the custom data structures, and +the definition of the complete IPA interface (including both the Main and +the Event IPA interfaces). The name of the header file comes from the name +of the mojom file, which in this case was raspberrypi.mojom. + +The IPA module must implement the IPA interface class that is defined in the +header. In the case of our example, that is ipa::rpi::IPARPiInterface. The +ipa::rpi namespace comes from the namespace that we defined in the mojo data +definition file, in the "Namespacing" section. The name of the interface is the +same as the name given to the Main IPA interface. + +The function signature rules are the same as for the slots in the pipeline +handler side; PODs are passed by value, and structs are passed by const +reference. For the Main IPA interface, output values are also allowed (only +for synchronous calls), so there may be output parameters as well. If the +first output parameter is a POD it will be returned by value, otherwise +it will be returned by an output parameter pointer. The second and any other +output parameters will also be returned by output parameter pointers. + +For example, for the following function specification in the Main IPA interface +definition: + +.. code-block:: none + + configure(libcamera.IPACameraSensorInfo sensorInfo, + uint32 exampleNumber, + map streamConfig, + map entityControls, + ConfigInput ipaConfig) + => (int32 ret, ConfigOutput results); + +We will need to implement a function with the following function signature: + +.. code-block:: C++ + + int configure(const IPACameraSensorInfo &sensorInfo, + uint32_t exampleNumber, + const std::map &streamConfig, + const std::map &entityControls, + const ipa::rpi::ConfigInput &data, + ipa::rpi::ConfigOutput *response); + +The return value is int, because the first output parameter is int32. The rest +of the output parameters (in this case, only response) become output parameter +pointers. The non-POD input parameters become const references, and the POD +input parameter is passed by value. + +At any time after start() and before stop() (though usually only in response to +an IPA call), the IPA may send data to the pipeline handler by emitting +signals. These signals are defined in the C++ IPA interface class (which is in +the generated and included header). + +For example, for the following function defined in the Event IPA interface: + +.. code-block:: none + + statsMetadataComplete(uint32 bufferId, libcamera.ControlList controls); + +We can emit a signal like so: + +.. code-block:: C++ + + statsMetadataComplete.emit(bufferId & RPi::BufferMask::ID, libcameraMetadata_); diff --git a/spider-cam/libcamera/Documentation/guides/pipeline-handler.rst b/spider-cam/libcamera/Documentation/guides/pipeline-handler.rst new file mode 100644 index 0000000..7e45cdb --- /dev/null +++ b/spider-cam/libcamera/Documentation/guides/pipeline-handler.rst @@ -0,0 +1,1532 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +Pipeline Handler Writers Guide +============================== + +Pipeline handlers are the abstraction layer for device-specific hardware +configuration. They access and control hardware through the V4L2 and Media +Controller kernel interfaces, and implement an internal API to control the ISP +and capture components of a pipeline directly. + +Prerequisite knowledge: system architecture +------------------------------------------- + +A pipeline handler configures and manages the image acquisition and +transformation pipeline realized by specialized system peripherals combined with +an image source connected to the system through a data and control bus. The +presence, number and characteristics of them vary depending on the system design +and the product integration of the target platform. + +System components can be classified in three macro-categories: + +.. TODO: Insert references to the open CSI-2 (and other) specification. + +- Input ports: Interfaces to external devices, usually image sensors, + which transfer data from the physical bus to locations accessible by other + system peripherals. An input port needs to be configured according to the + input image format and size and could optionally apply basic transformations + on the received images, most typically cropping/scaling and some formats + conversion. The industry standard for the system typically targeted by + libcamera is to have receivers compliant with the MIPI CSI-2 specifications, + implemented on a compatible physical layer such as MIPI D-PHY or MIPI C-PHY. + Other design are possible but less common, such as LVDS or the legacy BT.601 + and BT.656 parallel protocols. + +- Image Signal Processor (ISP): A specialized media processor which applies + digital transformations on image streams. ISPs can be integrated as part of + the SoC as a memory interfaced system peripheral or packaged as stand-alone + chips connected to the application processor through a bus. Most hardware used + by libcamera makes use of in-system ISP designs but pipelines can equally + support external ISP chips or be instrumented to use other system resources + such as a GPU or an FPGA IP block. ISPs expose a software programming + interface that allows the configuration of multiple processing blocks which + form an "Image Transformation Pipeline". An ISP usually produces 'processed' + image streams along with the metadata describing the processing steps which + have been applied to generate the output frames. + +- Camera Sensor: Digital components that integrate an image sensor with control + electronics and usually a lens. It interfaces to the SoC image receiver ports + and is programmed to produce images in a format and size suitable for the + current system configuration. Complex camera modules can integrate on-board + ISP or DSP chips and process images before delivering them to the system. Most + systems with a dedicated ISP processor will usually integrate camera sensors + which produce images in Raw Bayer format and defer processing to it. + +It is the responsibility of the pipeline handler to interface with these (and +possibly other) components of the system and implement the following +functionalities: + +- Detect and register camera devices available in the system with an associated + set of image streams. + +- Configure the image acquisition and processing pipeline by assigning the + system resources (memory, shared components, etc.) to satisfy the + configuration requested by the application. + +- Start and stop the image acquisition and processing sessions. + +- Apply configuration settings requested by applications and computed by image + processing algorithms integrated in libcamera to the hardware devices. + +- Notify applications of the availability of new images and deliver them to the + correct locations. + +Prerequisite knowledge: libcamera architecture +---------------------------------------------- + +A pipeline handler makes use of the following libcamera classes to realize the +functionalities described above. Below is a brief overview of each of those: + +.. TODO: (All) Convert to sphinx refs +.. TODO: (MediaDevice) Reference to the Media Device API (possibly with versioning requirements) +.. TODO: (IPAInterface) refer to the IPA guide + +- `MediaDevice `_: + Instances of this class are associated with a kernel media controller + device and its connected objects. + +- `DeviceEnumerator `_: + Enumerates all media devices attached to the system and the media entities + registered with it, by creating instances of the ``MediaDevice`` class and + storing them. + +- `DeviceMatch `_: + Describes a media device search pattern using entity names, or other + properties. + +- `V4L2VideoDevice `_: + Models an instance of a V4L2 video device constructed with the path to a V4L2 + video device node. + +- `V4L2SubDevice `_: + Provides an API to the sub-devices that model the hardware components of a + V4L2 device. + +- `CameraSensor `_: + Abstracts camera sensor handling by hiding the details of the V4L2 subdevice + kernel API and caching sensor information. + +- `Camera::Private `_: + Represents device-specific data a pipeline handler associates to each Camera + instance. + +- `StreamConfiguration `_: + Models the current configuration of an image stream produced by the camera by + reporting its format and sizes. + +- `CameraConfiguration `_: + Represents the current configuration of a camera, which includes a list of + stream configurations for each active stream in a capture session. When + validated, it is applied to the camera. + +- `IPAInterface `_: + The interface to the Image Processing Algorithm (IPA) module which performs + the computation of the image processing pipeline tuning parameters. + +- `ControlList `_: + A list of control items, indexed by Control<> instances or by numerical index + which contains values used by application and IPA to change parameters of + image streams, used to return to applications and share with IPA the metadata + associated with the captured images, and to advertise the immutable camera + characteristics enumerated at system initialization time. + +Creating a PipelineHandler +-------------------------- + +This guide walks through the steps to create a simple pipeline handler +called “Vivid†that supports the `V4L2 Virtual Video Test Driver`_ (vivid). + +To use the vivid test driver, you first need to check that the vivid kernel +module is loaded, for example with the ``modprobe vivid`` command. + +.. _V4L2 Virtual Video Test Driver: https://www.kernel.org/doc/html/latest/admin-guide/media/vivid.html + +Create the skeleton file structure +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To add a new pipeline handler, create a directory to hold the pipeline code in +the *src/libcamera/pipeline/* directory that matches the name of the pipeline +(in this case *vivid*). Inside the new directory add a *meson.build* file that +integrates with the libcamera build system, and a *vivid.cpp* file that matches +the name of the pipeline. + +In the *meson.build* file, add the *vivid.cpp* file as a build source for +libcamera by adding it to the global meson ``libcamera_sources`` variable: + +.. code-block:: none + + # SPDX-License-Identifier: CC0-1.0 + + libcamera_sources += files([ + 'vivid.cpp', + ]) + +Users of libcamera can selectively enable pipelines while building libcamera +using the ``pipelines`` option. + +For example, to enable only the IPU3, UVC, and VIVID pipelines, specify them as +a comma separated list with ``-Dpipelines`` when generating a build directory: + +.. code-block:: shell + + meson build -Dpipelines=ipu3,uvcvideo,vivid + +Read the `Meson build configuration`_ documentation for more information on +configuring a build directory. + +.. _Meson build configuration: https://mesonbuild.com/Configuring-a-build-directory.html + +To add the new pipeline handler to this list of options, add its directory name +to the libcamera build options in the top level ``meson_options.txt``. + +.. code-block:: none + + option('pipelines', + type : 'array', + choices : ['ipu3', 'rkisp1', 'rpi/vc4', 'simple', 'uvcvideo', 'vimc', 'vivid'], + description : 'Select which pipeline handlers to include') + + +In *vivid.cpp* add the pipeline handler to the ``libcamera`` namespace, defining +a `PipelineHandler`_ derived class named PipelineHandlerVivid, and add stub +implementations for the overridden class members. + +.. _PipelineHandler: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html + +.. code-block:: cpp + + namespace libcamera { + + class PipelineHandlerVivid : public PipelineHandler + { + public: + PipelineHandlerVivid(CameraManager *manager); + + CameraConfiguration *generateConfiguration(Camera *camera, + Span roles) override; + int configure(Camera *camera, CameraConfiguration *config) override; + + int exportFrameBuffers(Camera *camera, Stream *stream, + std::vector> *buffers) override; + + int start(Camera *camera, const ControlList *controls) override; + void stop(Camera *camera) override; + + int queueRequestDevice(Camera *camera, Request *request) override; + + bool match(DeviceEnumerator *enumerator) override; + }; + + PipelineHandlerVivid::PipelineHandlerVivid(CameraManager *manager) + : PipelineHandler(manager) + { + } + + CameraConfiguration *PipelineHandlerVivid::generateConfiguration(Camera *camera, + Span roles) + { + return nullptr; + } + + int PipelineHandlerVivid::configure(Camera *camera, CameraConfiguration *config) + { + return -1; + } + + int PipelineHandlerVivid::exportFrameBuffers(Camera *camera, Stream *stream, + std::vector> *buffers) + { + return -1; + } + + int PipelineHandlerVivid::start(Camera *camera, const ControlList *controls) + { + return -1; + } + + void PipelineHandlerVivid::stop(Camera *camera) + { + } + + int PipelineHandlerVivid::queueRequestDevice(Camera *camera, Request *request) + { + return -1; + } + + bool PipelineHandlerVivid::match(DeviceEnumerator *enumerator) + { + return false; + } + + REGISTER_PIPELINE_HANDLER(PipelineHandlerVivid, "vivid") + + } /* namespace libcamera */ + +Note that you must register the ``PipelineHandler`` subclass with the pipeline +handler factory using the `REGISTER_PIPELINE_HANDLER`_ macro which +registers it and creates a global symbol to reference the class and make it +available to try and match devices. +String "vivid" is the name assigned to the pipeline, matching the pipeline +subdirectory name in the source tree. + +.. _REGISTER_PIPELINE_HANDLER: https://libcamera.org/api-html/pipeline__handler_8h.html + +For debugging and testing a pipeline handler during development, you can define +a log message category for the pipeline handler. The ``LOG_DEFINE_CATEGORY`` +macro and ``LIBCAMERA_LOG_LEVELS`` environment variable help you use the inbuilt +libcamera `logging infrastructure`_ that allow for the inspection of internal +operations in a user-configurable way. + +.. _logging infrastructure: https://libcamera.org/api-html/log_8h.html + +Add the following before the ``PipelineHandlerVivid`` class declaration: + +.. code-block:: cpp + + LOG_DEFINE_CATEGORY(VIVID) + +At this point you need the following includes for logging and pipeline handler +features: + +.. code-block:: cpp + + #include + + #include "libcamera/internal/pipeline_handler.h" + +Run the following commands: + +.. code-block:: shell + + meson build + ninja -C build + +To build the libcamera code base, and confirm that the build system found the +new pipeline handler by running: + +.. code-block:: shell + + LIBCAMERA_LOG_LEVELS=Camera:0 ./build/src/cam/cam -l + +And you should see output like the below: + +.. code-block:: shell + + DEBUG Camera camera_manager.cpp:148 Found registered pipeline handler 'PipelineHandlerVivid' + +Matching devices +~~~~~~~~~~~~~~~~ + +Each pipeline handler registered in libcamera gets tested against the current +system configuration, by matching a ``DeviceMatch`` with the system +``DeviceEnumerator``. A successful match makes sure all the requested components +have been registered in the system and allows the pipeline handler to be +initialized. + +The main entry point of a pipeline handler is the `match()`_ class member +function. When the ``CameraManager`` is started (using the `start()`_ function), +all the registered pipeline handlers are iterated and their ``match`` function +called with an enumerator of all devices it found on a system. + +The match function should identify if there are suitable devices available in +the ``DeviceEnumerator`` which the pipeline supports, returning ``true`` if it +matches a device, and ``false`` if it does not. To do this, construct a +`DeviceMatch`_ class with the name of the ``MediaController`` device to match. +You can specify the search further by adding specific media entities to the +search using the ``.add()`` function on the DeviceMatch. + +.. _match(): https://www.libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html#a7cd5b652a2414b543ec20ba9dabf61b6 +.. _start(): https://libcamera.org/api-html/classlibcamera_1_1CameraManager.html#a49e322880a2a26013bb0076788b298c5 +.. _DeviceMatch: https://libcamera.org/api-html/classlibcamera_1_1DeviceMatch.html + +This example uses search patterns that match vivid, but when developing a new +pipeline handler, you should change this value to suit your device identifier. + +Replace the contents of the ``PipelineHandlerVivid::match`` function with the +following: + +.. code-block:: cpp + + DeviceMatch dm("vivid"); + dm.add("vivid-000-vid-cap"); + return false; // Prevent infinite loops for now + +With the device matching criteria defined, attempt to acquire exclusive access +to the matching media controller device with the `acquireMediaDevice`_ function. +If the function attempts to acquire a device it has already matched, it returns +``false``. + +.. _acquireMediaDevice: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html#a77e424fe704e7b26094164b9189e0f84 + +Add the following below ``dm.add("vivid-000-vid-cap");``: + +.. code-block:: cpp + + MediaDevice *media = acquireMediaDevice(enumerator, dm); + if (!media) + return false; + +The pipeline handler now needs an additional include. Add the following to the +existing include block for device enumeration functionality: + +.. code-block:: cpp + + #include "libcamera/internal/device_enumerator.h" + +At this stage, you should test that the pipeline handler can successfully match +the devices, but have not yet added any code to create a Camera which libcamera +reports to applications. + +As a temporary validation step, add a debug print with + +.. code-block:: cpp + + LOG(VIVID, Debug) << "Vivid Device Identified"; + +before the final closing return statement in the ``PipelineHandlerVivid::match`` +function for when when the pipeline handler successfully matches the +``MediaDevice`` and ``MediaEntity`` names. + +Test that the pipeline handler matches and finds a device by rebuilding, and +running + +.. code-block:: shell + + ninja -C build + LIBCAMERA_LOG_LEVELS=Pipeline,VIVID:0 ./build/src/cam/cam -l + +And you should see output like the below: + +.. code-block:: shell + + DEBUG VIVID vivid.cpp:74 Vivid Device Identified + +Creating camera devices +~~~~~~~~~~~~~~~~~~~~~~~ + +If the pipeline handler successfully matches with the system it is running on, +it can proceed to initialization, by creating all the required instances of the +``V4L2VideoDevice``, ``V4L2Subdevice`` and ``CameraSensor`` hardware abstraction +classes. If the Pipeline handler supports an ISP, it can then also initialise +the IPA module before proceeding to the creation of the Camera devices. + +An image ``Stream`` represents a sequence of images and data of known size and +format, stored in application-accessible memory locations. Typical examples of +streams are the ISP processed outputs and the raw images captured at the +receivers port output. + +The Pipeline Handler is responsible for defining the set of Streams associated +with the Camera. + +Each Camera has instance-specific data represented using the `Camera::Private`_ +class, which can be extended for the specific needs of the pipeline handler. + +.. _Camera::Private: https://libcamera.org/api-html/classlibcamera_1_1Camera_1_1Private.html + + +To support the Camera we will later register, we need to create a Camera::Private +class that we can implement for our specific Pipeline Handler. + +Define a new ``VividCameraPrivate()`` class derived from ``Camera::Private`` by +adding the following code before the PipelineHandlerVivid class definition where +it will be used: + +.. code-block:: cpp + + class VividCameraData : public Camera::Private + { + public: + VividCameraData(PipelineHandler *pipe, MediaDevice *media) + : Camera::Private(pipe), media_(media), video_(nullptr) + { + } + + ~VividCameraData() + { + delete video_; + } + + int init(); + void bufferReady(FrameBuffer *buffer); + + MediaDevice *media_; + V4L2VideoDevice *video_; + Stream stream_; + }; + +This example pipeline handler handles a single video device and supports a +single stream, represented by the ``VividCameraData`` class members. More +complex pipeline handlers might register cameras composed of several video +devices and sub-devices, or multiple streams per camera that represent the +several components of the image capture pipeline. You should represent all these +components in the ``Camera::Private`` derived class when developing a custom +PipelineHandler. + +In our example VividCameraData we implement an ``init()`` function to prepare +the object from our PipelineHandler, however the Camera::Private class does not +specify the interface for initialisation and PipelineHandlers can manage this +based on their own needs. Derived Camera::Private classes are used only by their +respective pipeline handlers. + +The Camera::Private class stores the context required for each camera instance +and is usually responsible for opening all Devices used in the capture pipeline. + +We can now implement the ``init`` function for our example Pipeline Handler to +create a new V4L2 video device from the media entity, which we can specify using +the `MediaDevice::getEntityByName`_ function from the MediaDevice. As our +example is based upon the simplistic Vivid test device, we only need to open a +single capture device named 'vivid-000-vid-cap' by the device. + +.. _MediaDevice::getEntityByName: https://libcamera.org/api-html/classlibcamera_1_1MediaDevice.html#ad5d9279329ef4987ceece2694b33e230 + +.. code-block:: cpp + + int VividCameraData::init() + { + video_ = new V4L2VideoDevice(media_->getEntityByName("vivid-000-vid-cap")); + if (video_->open()) + return -ENODEV; + + return 0; + } + +The VividCameraData should be created and initialised before we move on to +register a new Camera device so we need to construct and initialise our +VividCameraData after we have identified our device within +PipelineHandlerVivid::match(). The VividCameraData is wrapped by a +std::unique_ptr to help manage the lifetime of the instance. + +If the camera data initialization fails, return ``false`` to indicate the +failure to the ``match()`` function and prevent retrying of the pipeline +handler. + +.. code-block:: cpp + + std::unique_ptr data = std::make_unique(this, media); + + if (data->init()) + return false; + + +Once the camera data has been initialized, the Camera device instances and the +associated streams have to be registered. Create a set of streams for the +camera, which for this device is only one. You create a camera using the static +`Camera::create`_ function, passing the Camera::Private instance, the id of the +camera, and the streams available. Then register the camera with the pipeline +handler and camera manager using `registerCamera`_. + +Finally with a successful construction, we return 'true' indicating that the +PipelineHandler successfully matched and constructed a device. + +.. _Camera::create: https://libcamera.org/api-html/classlibcamera_1_1Camera.html#a453740e0d2a2f495048ae307a85a2574 +.. _registerCamera: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html#adf02a7f1bbd87aca73c0e8d8e0e6c98b + +.. code-block:: cpp + + std::set streams{ &data->stream_ }; + std::shared_ptr camera = Camera::create(this, data->video_->deviceName(), streams); + registerCamera(std::move(camera), std::move(data)); + + return true; + + +Our match function should now look like the following: + +.. code-block:: cpp + + bool PipelineHandlerVivid::match(DeviceEnumerator *enumerator) + { + DeviceMatch dm("vivid"); + dm.add("vivid-000-vid-cap"); + + MediaDevice *media = acquireMediaDevice(enumerator, dm); + if (!media) + return false; + + std::unique_ptr data = std::make_unique(this, media); + + /* Locate and open the capture video node. */ + if (data->init()) + return false; + + /* Create and register the camera. */ + std::set streams{ &data->stream_ }; + const std::string &id = data->video_->deviceName(); + std::shared_ptr camera = Camera::create(data.release(), id, streams); + registerCamera(std::move(camera)); + + return true; + } + +We will need to use our custom VividCameraData class frequently throughout the +pipeline handler, so we add a private convenience helper to our Pipeline handler +to obtain and cast the custom VividCameraData instance from a Camera::Private +instance. + +.. code-block:: cpp + + private: + VividCameraData *cameraData(Camera *camera) + { + return static_cast(camera->_d()); + } + +At this point, you need to add the following new includes to provide the Camera +interface, and device interaction interfaces. + +.. code-block:: cpp + + #include + #include "libcamera/internal/media_device.h" + #include "libcamera/internal/v4l2_videodevice.h" + +Registering controls and properties +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The libcamera `controls framework`_ allows an application to configure the +streams capture parameters on a per-frame basis and is also used to advertise +immutable properties of the ``Camera`` device. + +The libcamera controls and properties are defined in YAML form which is +processed to automatically generate documentation and interfaces. Controls are +defined by the src/libcamera/`control_ids_core.yaml`_ file and camera properties +are defined by src/libcamera/`properties_ids_core.yaml`_. + +.. _controls framework: https://libcamera.org/api-html/controls_8h.html +.. _control_ids_core.yaml: https://libcamera.org/api-html/control__ids_8h.html +.. _properties_ids_core.yaml: https://libcamera.org/api-html/property__ids_8h.html + +Pipeline handlers can optionally register the list of controls an application +can set as well as a list of immutable camera properties. Being both +Camera-specific values, they are represented in the ``Camera::Private`` base +class, which provides two members for this purpose: the +`Camera::Private::controlInfo_`_ and the `Camera::Private::properties_`_ fields. + +.. _Camera::Private::controlInfo_: https://libcamera.org/api-html/classlibcamera_1_1Camera_1_1Private.html#ab4e183eb4dabe929d1b2bbbb519b969f +.. _Camera::Private::properties_: https://libcamera.org/api-html/classlibcamera_1_1Camera_1_1Private.html#ad31f12f5ed9c1fbe25750902f4791064 + +The ``controlInfo_`` field represents a map of ``ControlId`` instances +associated with the limits of valid values supported for the control. More +information can be found in the `ControlInfoMap`_ class documentation. + +.. _ControlInfoMap: https://libcamera.org/api-html/classlibcamera_1_1ControlInfoMap.html + +Pipeline handlers register controls to expose the tunable device and IPA +parameters to applications. Our example pipeline handler only exposes trivial +controls of the video device, by registering a ``ControlId`` instance with +associated values for each supported V4L2 control but demonstrates the mapping +of V4L2 Controls to libcamera ControlIDs. + +Complete the initialization of the ``VividCameraData`` class by adding the +following code to the ``VividCameraData::init()`` function to initialise the +controls. For more complex control configurations, this could of course be +broken out to a separate function, but for now we just initialise the small set +inline in our VividCameraData init: + +.. code-block:: cpp + + /* Initialise the supported controls. */ + const ControlInfoMap &controls = video_->controls(); + ControlInfoMap::Map ctrls; + + for (const auto &ctrl : controls) { + const ControlId *id; + ControlInfo info; + + switch (ctrl.first->id()) { + case V4L2_CID_BRIGHTNESS: + id = &controls::Brightness; + info = ControlInfo{ { -1.0f }, { 1.0f }, { 0.0f } }; + break; + case V4L2_CID_CONTRAST: + id = &controls::Contrast; + info = ControlInfo{ { 0.0f }, { 2.0f }, { 1.0f } }; + break; + case V4L2_CID_SATURATION: + id = &controls::Saturation; + info = ControlInfo{ { 0.0f }, { 2.0f }, { 1.0f } }; + break; + default: + continue; + } + + ctrls.emplace(id, info); + } + + controlInfo_ = ControlInfoMap(std::move(ctrls), controls::controls); + +The ``properties_`` field is a list of ``ControlId`` instances +associated with immutable values, which represent static characteristics that can +be used by applications to identify camera devices in the system. Properties can be +registered by inspecting the values of V4L2 controls from the video devices and +camera sensor (for example to retrieve the position and orientation of a camera) +or to express other immutable characteristics. The example pipeline handler does +not register any property, but examples are available in the libcamera code +base. + +.. TODO: Add a property example to the pipeline handler. At least the model. + +At this point you need to add the following includes to the top of the file for +handling controls: + +.. code-block:: cpp + + #include + #include + +Vendor-specific controls and properties +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Vendor-specific controls and properties must be defined in a separate YAML file +and included in the build by defining the pipeline handler to file mapping in +include/libcamera/meson.build. These YAML files live in the src/libcamera +directory. + +For example, adding a Raspberry Pi vendor control file for the PiSP pipeline +handler is done with the following mapping: + +.. code-block:: meson + + controls_map = { + 'controls': { + 'draft': 'control_ids_draft.yaml', + 'libcamera': 'control_ids_core.yaml', + 'rpi/pisp': 'control_ids_rpi.yaml', + }, + + 'properties': { + 'draft': 'property_ids_draft.yaml', + 'libcamera': 'property_ids_core.yaml', + } + } + +The pipeline handler named above must match the pipeline handler option string +specified in the meson build configuration. + +Vendor-specific controls and properties must contain a `vendor: ` +tag in the YAML file. Every unique vendor tag must define a unique and +non-overlapping range of reserved control IDs in src/libcamera/control_ranges.yaml. + +For example, the following block defines a vendor-specific control with the +`rpi` vendor tag: + +.. code-block:: yaml + + vendor: rpi + controls: + - PispConfigDumpFile: + type: string + description: | + Triggers the Raspberry Pi PiSP pipeline handler to generate a JSON + formatted dump of the Backend configuration to the filename given by the + value of the control. + +The controls will be generated in the vendor-specific namespace +`libcamera::controls::rpi`. Additionally a `#define +LIBCAMERA_HAS_RPI_VENDOR_CONTROLS` will be available to allow applications to +test for the availability of these controls. + +Generating a default configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Once ``Camera`` devices and the associated ``Streams`` have been registered, an +application can proceed to acquire and configure the camera to prepare it for a +frame capture session. + +Applications specify the requested configuration by assigning a +``StreamConfiguration`` instance to each stream they want to enable which +expresses the desired image size and pixel format. The stream configurations are +grouped in a ``CameraConfiguration`` which is inspected by the pipeline handler +and validated to adjust it to a supported configuration. This may involve +adjusting the formats or image sizes or alignments for example to match the +capabilities of the device. + +Applications may choose to repeat validation stages, adjusting parameters until +a set of validated StreamConfigurations are returned that is acceptable for the +applications needs. When the pipeline handler receives a valid camera +configuration it can use the image stream configurations to apply settings to +the hardware devices. + +This configuration and validation process is managed with another Pipeline +specific class derived from a common base implementation and interface. + +To support validation in our example pipeline handler, Create a new class called +``VividCameraConfiguration`` derived from the base `CameraConfiguration`_ class +which we can implement and use within our ``PipelineHandlerVivid`` class. + +.. _CameraConfiguration: https://libcamera.org/api-html/classlibcamera_1_1CameraConfiguration.html + +The derived ``CameraConfiguration`` class must override the base class +``validate()`` function, where the stream configuration inspection and +adjustment happens. + +.. code-block:: cpp + + class VividCameraConfiguration : public CameraConfiguration + { + public: + VividCameraConfiguration(); + + Status validate() override; + }; + + VividCameraConfiguration::VividCameraConfiguration() + : CameraConfiguration() + { + } + +Applications generate a ``CameraConfiguration`` instance by calling the +`Camera::generateConfiguration()`_ function, which calls into the pipeline +implementation of the overridden `PipelineHandler::generateConfiguration()`_ +function. + +.. _Camera::generateConfiguration(): https://libcamera.org/api-html/classlibcamera_1_1Camera.html#a25c80eb7fc9b1cf32692ce0c7f09991d +.. _PipelineHandler::generateConfiguration(): https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html#a7932e87735695500ce1f8c7ae449b65b + +Configurations are generated by receiving a list of ``StreamRole`` instances, +which libcamera uses as predefined ways an application intends to use a camera +(You can read the full list in the `StreamRole API`_ documentation). These are +optional hints on how an application intends to use a stream, and a pipeline +handler should return an ideal configuration for each role that is requested. + +.. _StreamRole API: https://libcamera.org/api-html/stream_8h.html#file_a295d1f5e7828d95c0b0aabc0a8baac03 + +In the pipeline handler ``generateConfiguration`` implementation, remove the +``return nullptr;``, create a new instance of the ``CameraConfiguration`` +derived class, and assign it to a base class pointer. + +.. code-block:: cpp + + VividCameraData *data = cameraData(camera); + CameraConfiguration *config = new VividCameraConfiguration(); + +A ``CameraConfiguration`` is specific to each pipeline, so you can only create +it from the pipeline handler code path. Applications can also generate an empty +configuration and add desired stream configurations manually. Pipelines must +allow for this by returning an empty configuration if no roles are requested. + +To support this in our PipelineHandlerVivid, next add the following check in +``generateConfiguration`` after the Cameraconfiguration has been constructed: + +.. code-block:: cpp + + if (roles.empty()) + return config; + +A production pipeline handler should generate the ``StreamConfiguration`` for +all the appropriate stream roles a camera device supports. For this simpler +example (with only one stream), the pipeline handler always returns the same +configuration, inferred from the underlying V4L2VideoDevice. + +How it does this is shown below, but examination of the more full-featured +pipelines for IPU3, RKISP1 and RaspberryPi are recommended to explore more +complex examples. + +To generate a ``StreamConfiguration``, you need a list of pixel formats and +frame sizes which are supported as outputs of the stream. You can fetch a map of +the ``V4LPixelFormat`` and ``SizeRange`` supported by the underlying output +device, but the pipeline handler needs to convert this to a +``libcamera::PixelFormat`` type to pass to applications. We do this here using +``std::transform`` to convert the formats and populate a new ``PixelFormat`` map +as shown below. + +Continue adding the following code example to our ``generateConfiguration`` +implementation. + +.. code-block:: cpp + + std::map> v4l2Formats = + data->video_->formats(); + std::map> deviceFormats; + std::transform(v4l2Formats.begin(), v4l2Formats.end(), + std::inserter(deviceFormats, deviceFormats.begin()), + [&](const decltype(v4l2Formats)::value_type &format) { + return decltype(deviceFormats)::value_type{ + format.first.toPixelFormat(), + format.second + }; + }); + +The `StreamFormats`_ class holds information about the pixel formats and frame +sizes that a stream can support. The class groups size information by the pixel +format, which can produce it. + +.. _StreamFormats: https://libcamera.org/api-html/classlibcamera_1_1StreamFormats.html + +The code below uses the ``StreamFormats`` class to represent all of the +supported pixel formats, associated with a list of frame sizes. It then +generates a supported StreamConfiguration to model the information an +application can use to configure a single stream. + +Continue adding the following code to support this: + +.. code-block:: cpp + + StreamFormats formats(deviceFormats); + StreamConfiguration cfg(formats); + +As well as a list of supported StreamFormats, the StreamConfiguration is also +expected to provide an initialised default configuration. This may be arbitrary, +but depending on use case you may wish to select an output that matches the +Sensor output, or prefer a pixelformat which might provide higher performance on +the hardware. The bufferCount represents the number of buffers required to +support functional continuous processing on this stream. + +.. code-block:: cpp + + cfg.pixelFormat = formats::BGR888; + cfg.size = { 1280, 720 }; + cfg.bufferCount = 4; + +Finally add each ``StreamConfiguration`` generated to the +``CameraConfiguration``, and ensure that it has been validated before returning +it to the application. With only a single supported stream, this code adds only +a single StreamConfiguration. However a StreamConfiguration should be added for +each supported role in a device that can handle more streams. + +Add the following code to complete the implementation of +``generateConfiguration``: + +.. code-block:: cpp + + config->addConfiguration(cfg); + + config->validate(); + + return config; + +To validate a camera configuration, a pipeline handler must implement the +`CameraConfiguration::validate()`_ function in its derived class to inspect all +the stream configuration associated to it, make any adjustments required to make +the configuration valid, and return the validation status. + +If changes are made, it marks the configuration as ``Adjusted``, however if the +requested configuration is not supported and cannot be adjusted it shall be +refused and marked as ``Invalid``. + +.. _CameraConfiguration::validate(): https://libcamera.org/api-html/classlibcamera_1_1CameraConfiguration.html#a29f8f263384c6149775b6011c7397093 + +The validation phase makes sure all the platform-specific constraints are +respected by the requested configuration. The most trivial examples being making +sure the requested image formats are supported and the image alignment +restrictions adhered to. The pipeline handler specific implementation of +``validate()`` shall inspect all the configuration parameters received and never +assume they are correct, as applications are free to change the requested stream +parameters after the configuration has been generated. + +Again, this example pipeline handler is simpler, look at the more complex +implementations for a realistic example. + +Add the following function implementation to your file: + +.. code-block:: cpp + + CameraConfiguration::Status VividCameraConfiguration::validate() + { + Status status = Valid; + + if (config_.empty()) + return Invalid; + + if (config_.size() > 1) { + config_.resize(1); + status = Adjusted; + } + + StreamConfiguration &cfg = config_[0]; + + const std::vector formats = cfg.formats().pixelformats(); + if (std::find(formats.begin(), formats.end(), cfg.pixelFormat) == formats.end()) { + cfg.pixelFormat = cfg.formats().pixelformats()[0]; + LOG(VIVID, Debug) << "Adjusting format to " << cfg.pixelFormat.toString(); + status = Adjusted; + } + + cfg.bufferCount = 4; + + return status; + } + +Now that we are handling the ``PixelFormat`` type, we also need to add +``#include `` to the include section before we rebuild the +codebase, and test: + +.. code-block:: shell + + ninja -C build + LIBCAMERA_LOG_LEVELS=Pipeline,VIVID:0 ./build/src/cam/cam -c vivid -I + +You should see the following output showing the capabilites of our new pipeline +handler, and showing that our configurations have been generated: + +.. code-block:: shell + + Using camera vivid + 0: 1280x720-BGR888 + * Pixelformat: NV21 (320x180)-(3840x2160)/(+0,+0) + - 320x180 + - 640x360 + - 640x480 + - 1280x720 + - 1920x1080 + - 3840x2160 + * Pixelformat: NV12 (320x180)-(3840x2160)/(+0,+0) + - 320x180 + - 640x360 + - 640x480 + - 1280x720 + - 1920x1080 + - 3840x2160 + * Pixelformat: BGRA8888 (320x180)-(3840x2160)/(+0,+0) + - 320x180 + - 640x360 + - 640x480 + - 1280x720 + - 1920x1080 + - 3840x2160 + * Pixelformat: RGBA8888 (320x180)-(3840x2160)/(+0,+0) + - 320x180 + - 640x360 + - 640x480 + - 1280x720 + - 1920x1080 + - 3840x2160 + +Configuring a device +~~~~~~~~~~~~~~~~~~~~ + +With the configuration generated, and optionally modified and re-validated, a +pipeline handler needs a function that allows an application to apply a +configuration to the hardware devices. + +The `PipelineHandler::configure()`_ function receives a valid +`CameraConfiguration`_ and applies the settings to hardware devices, using its +parameters to prepare a device for a streaming session with the desired +properties. + +.. _PipelineHandler::configure(): https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html#a930f2a9cdfb51dfb4b9ca3824e84fc29 +.. _CameraConfiguration: https://libcamera.org/api-html/classlibcamera_1_1CameraConfiguration.html + +Replace the contents of the stubbed ``PipelineHandlerVivid::configure`` function +with the following to obtain the camera data and stream configuration. This +pipeline handler supports only a single stream, so it directly obtains the first +``StreamConfiguration`` from the camera configuration. A pipeline handler with +multiple streams should inspect each StreamConfiguration and configure the +system accordingly. + +.. code-block:: cpp + + VividCameraData *data = cameraData(camera); + StreamConfiguration &cfg = config->at(0); + int ret; + +The Vivid capture device is a V4L2 video device, so we use a `V4L2DeviceFormat`_ +with the fourcc and size attributes to apply directly to the capture device +node. The fourcc attribute is a `V4L2PixelFormat`_ and differs from the +``libcamera::PixelFormat``. Converting the format requires knowledge of the +plane configuration for multiplanar formats, so you must explicitly convert it +using the helper ``V4L2VideoDevice::toV4L2PixelFormat()`` provided by the +V4L2VideoDevice instance that the format will be applied on. + +.. _V4L2DeviceFormat: https://libcamera.org/api-html/classlibcamera_1_1V4L2DeviceFormat.html +.. _V4L2PixelFormat: https://libcamera.org/api-html/classlibcamera_1_1V4L2PixelFormat.html + +Add the following code beneath the code from above: + +.. code-block:: cpp + + V4L2DeviceFormat format = {}; + format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat); + format.size = cfg.size; + +Set the video device format defined above using the +`V4L2VideoDevice::setFormat()`_ function. You should check if the kernel +driver has adjusted the format, as this shows the pipeline handler has failed to +handle the validation stages correctly, and the configure operation shall also +fail. + +.. _V4L2VideoDevice::setFormat(): https://libcamera.org/api-html/classlibcamera_1_1V4L2VideoDevice.html#ad67b47dd9327ce5df43350b80c083cca + +Continue the implementation with the following code: + +.. code-block:: cpp + + ret = data->video_->setFormat(&format); + if (ret) + return ret; + + if (format.size != cfg.size || + format.fourcc != data->video_->toV4L2PixelFormat(cfg.pixelFormat)) + return -EINVAL; + +Finally, store and set stream-specific data reflecting the state of the stream. +Associate the configuration with the stream by using the +`StreamConfiguration::setStream`_ function, and set the values of individual +stream configuration members as required. + +.. _StreamConfiguration::setStream: https://libcamera.org/api-html/structlibcamera_1_1StreamConfiguration.html#a74a0eb44dad1b00112c7c0443ae54a12 + +.. NOTE: the cfg.setStream() call here associates the stream to the + StreamConfiguration however that should quite likely be done as part of + the validation process. TBD + +Complete the configure implementation with the following code: + +.. code-block:: cpp + + cfg.setStream(&data->stream_); + cfg.stride = format.planes[0].bpl; + + return 0; + +.. TODO: stride SHALL be assigned in validate + +Initializing device controls +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Pipeline handlers can optionally initialize the video devices and camera sensor +controls at system configuration time, to make sure they are defaulted to sane +values. Handling of device controls is again performed using the libcamera +`controls framework`_. + +.. _Controls Framework: https://libcamera.org/api-html/controls_8h.html + +This section is particularly specific to Vivid as it sets the initial values of +controls to match `Vivid Controls`_ defined by the kernel driver. You won't need +any of the code below for your pipeline handler, but it's included as an example +of how to implement functionality your pipeline handler might need. + +.. _Vivid Controls: https://www.kernel.org/doc/html/latest/admin-guide/media/vivid.html#controls + +We need to add some definitions at the top of the file for convenience. These +come directly from the kernel sources: + +.. code-block:: cpp + + #define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000) + #define VIVID_CID_VIVID_CLASS (0x00f00000 | 1) + #define VIVID_CID_TEST_PATTERN (VIVID_CID_VIVID_BASE + 0) + #define VIVID_CID_OSD_TEXT_MODE (VIVID_CID_VIVID_BASE + 1) + #define VIVID_CID_HOR_MOVEMENT (VIVID_CID_VIVID_BASE + 2) + +We can now use the V4L2 control IDs to prepare a list of controls with the +`ControlList`_ class, and set them using the `ControlList::set()`_ function. + +.. _ControlList: https://libcamera.org/api-html/classlibcamera_1_1ControlList.html +.. _ControlList::set(): https://libcamera.org/api-html/classlibcamera_1_1ControlList.html#a74a1a29abff5243e6e37ace8e24eb4ba + +In our pipeline ``configure`` function, add the following code after the format +has been set and checked to initialise the ControlList and apply it to the +device: + +.. code-block:: cpp + + ControlList controls(data->video_->controls()); + controls.set(VIVID_CID_TEST_PATTERN, 0); + controls.set(VIVID_CID_OSD_TEXT_MODE, 0); + + controls.set(V4L2_CID_BRIGHTNESS, 128); + controls.set(V4L2_CID_CONTRAST, 128); + controls.set(V4L2_CID_SATURATION, 128); + + controls.set(VIVID_CID_HOR_MOVEMENT, 5); + + ret = data->video_->setControls(&controls); + if (ret) { + LOG(VIVID, Error) << "Failed to set controls: " << ret; + return ret < 0 ? ret : -EINVAL; + } + +These controls configure VIVID to use a default test pattern, and enable all +on-screen display text, while configuring sensible brightness, contrast and +saturation values. Use the ``controls.set`` function to set individual controls. + +Buffer handling and stream control +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Once the system has been configured with the requested parameters, it is now +possible for applications to start capturing frames from the ``Camera`` device. + +libcamera implements a per-frame request capture model, realized by queueing +``Request`` instances to a ``Camera`` object. Before applications can start +submitting capture requests the capture pipeline needs to be prepared to deliver +frames as soon as they are requested. Memory should be initialized and made +available to the devices which have to be started and ready to produce +images. At the end of a capture session the ``Camera`` device needs to be +stopped, to gracefully clean up any allocated memory and stop the hardware +devices. Pipeline handlers implement two functions for these purposes, the +``start()`` and ``stop()`` functions. + +The memory initialization phase that happens at ``start()`` time serves to +configure video devices to be able to use memory buffers exported as dma-buf +file descriptors. From the pipeline handlers perspective the video devices that +provide application facing streams always act as memory importers which use, +in V4L2 terminology, buffers of V4L2_MEMORY_DMABUF memory type. + +libcamera also provides an API to allocate and export memory to applications +realized through the `exportFrameBuffers`_ function and the +`FrameBufferAllocator`_ class which will be presented later. + +.. _exportFrameBuffers: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html#a6312a69da7129c2ed41f9d9f790adf7c +.. _FrameBufferAllocator: https://libcamera.org/api-html/classlibcamera_1_1FrameBufferAllocator.html + +Please refer to the V4L2VideoDevice API documentation, specifically the +`allocateBuffers`_, `importBuffers`_ and `exportBuffers`_ functions for a +detailed description of the video device memory management. + +.. _allocateBuffers: https://libcamera.org/api-html/classlibcamera_1_1V4L2VideoDevice.html#a3a1a77e5e6c220ea7878e89485864a1c +.. _importBuffers: https://libcamera.org/api-html/classlibcamera_1_1V4L2VideoDevice.html#a154f5283d16ebd5e15d63e212745cb64 +.. _exportBuffers: https://libcamera.org/api-html/classlibcamera_1_1V4L2VideoDevice.html#ae9c0b0a68f350725b63b73a6da5a2ecd + +Video memory buffers are represented in libcamera by the `FrameBuffer`_ class. +A ``FrameBuffer`` instance has to be associated to each ``Stream`` which is part +of a capture ``Request``. Pipeline handlers should prepare the capture devices +by importing the dma-buf file descriptors it needs to operate on. This operation +is performed by using the ``V4L2VideoDevice`` API, which provides an +``importBuffers()`` function that prepares the video device accordingly. + +.. _FrameBuffer: https://libcamera.org/api-html/classlibcamera_1_1FrameBuffer.html + +Implement the pipeline handler ``start()`` function by replacing the stub +version with the following code: + +.. code-block:: c++ + + VividCameraData *data = cameraData(camera); + unsigned int count = data->stream_.configuration().bufferCount; + + int ret = data->video_->importBuffers(count); + if (ret < 0) + return ret; + + return 0; + +During the startup phase pipeline handlers allocate any internal buffer pool +required to transfer data between different components of the image capture +pipeline, for example, between the CSI-2 receiver and the ISP input. The example +pipeline does not require any internal pool, but examples are available in more +complex pipeline handlers in the libcamera code base. + +Applications might want to use memory allocated in the video devices instead of +allocating it from other parts of the system. libcamera provides an abstraction +to assist with this task in the `FrameBufferAllocator`_ class. The +``FrameBufferAllocator`` reserves memory for a ``Stream`` in the video device +and exports it as dma-buf file descriptors. From this point on, the allocated +``FrameBuffer`` are associated to ``Stream`` instances in a ``Request`` and then +imported by the pipeline hander in exactly the same fashion as if they were +allocated elsewhere. + +.. _FrameBufferAllocator: https://libcamera.org/api-html/classlibcamera_1_1FrameBufferAllocator.html + +Pipeline handlers support the ``FrameBufferAllocator`` operations by +implementing the `exportFrameBuffers`_ function, which will allocate memory in +the video device associated with a stream and export it. + +.. _exportFrameBuffers: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html#a6312a69da7129c2ed41f9d9f790adf7c + +Implement the ``exportFrameBuffers`` stub function with the following code to +handle this: + +.. code-block:: cpp + + unsigned int count = stream->configuration().bufferCount; + VividCameraData *data = cameraData(camera); + + return data->video_->exportBuffers(count, buffers); + +Once memory has been properly setup, the video devices can be started, to +prepare for capture operations. Complete the ``start`` function implementation +with the following code: + +.. code-block:: cpp + + ret = data->video_->streamOn(); + if (ret < 0) { + data->video_->releaseBuffers(); + return ret; + } + + return 0; + +The function starts the video device associated with the stream with the +`streamOn`_ function. If the call fails, the error value is propagated to the +caller and the `releaseBuffers`_ function releases any buffers to leave the +device in a consistent state. If your pipeline handler uses any image processing +algorithms, or other devices you should also stop them. + +.. _streamOn: https://libcamera.org/api-html/classlibcamera_1_1V4L2VideoDevice.html#a588a5dc9d6f4c54c61136ac43ff9a8cc +.. _releaseBuffers: https://libcamera.org/api-html/classlibcamera_1_1V4L2VideoDevice.html#a191619c152f764e03bc461611f3fcd35 + +Of course we also need to handle the corresponding actions to stop streaming on +a device, Add the following to the ``stop`` function, to stop the stream with +the `streamOff`_ function and release all buffers. + +.. _streamOff: https://libcamera.org/api-html/classlibcamera_1_1V4L2VideoDevice.html#a61998710615bdf7aa25a046c8565ed66 + +.. code-block:: cpp + + VividCameraData *data = cameraData(camera); + data->video_->streamOff(); + data->video_->releaseBuffers(); + +Queuing requests between applications and hardware +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +libcamera implements a streaming model based on capture requests queued by an +application to the ``Camera`` device. Each request contains at least one +``Stream`` instance with an associated ``FrameBuffer`` object. + +When an application sends a capture request, the pipeline handler identifies +which video devices have to be provided with buffers to generate a frame from +the enabled streams. + +This example pipeline handler identifies the buffer using the `findBuffer`_ +helper from the only supported stream and queues it to the capture device +directly with the `queueBuffer`_ function provided by the V4L2VideoDevice. + +.. _findBuffer: https://libcamera.org/api-html/classlibcamera_1_1Request.html#ac66050aeb9b92c64218945158559c4d4 +.. _queueBuffer: https://libcamera.org/api-html/classlibcamera_1_1V4L2VideoDevice.html#a594cd594686a8c1cf9ae8dba0b2a8a75 + +Replace the stubbed contents of ``queueRequestDevice`` with the following: + +.. code-block:: cpp + + VividCameraData *data = cameraData(camera); + FrameBuffer *buffer = request->findBuffer(&data->stream_); + if (!buffer) { + LOG(VIVID, Error) + << "Attempt to queue request with invalid stream"; + + return -ENOENT; + } + + int ret = data->video_->queueBuffer(buffer); + if (ret < 0) + return ret; + + return 0; + +Processing controls +~~~~~~~~~~~~~~~~~~~ + +Capture requests not only contain streams and memory buffers, but can +optionally contain a list of controls the application has set to modify the +streaming parameters. + +Applications can set controls registered by the pipeline handler in the +initialization phase, as explained in the `Registering controls and properties`_ +section. + +Implement a ``processControls`` function above the ``queueRequestDevice`` +function to loop through the control list received with each request, and +inspect the control values. Controls may need to be converted between the +libcamera control range definitions and their corresponding values on the device +before being set. + +.. code-block:: cpp + + int PipelineHandlerVivid::processControls(VividCameraData *data, Request *request) + { + ControlList controls(data->video_->controls()); + + for (auto it : request->controls()) { + unsigned int id = it.first; + unsigned int offset; + uint32_t cid; + + if (id == controls::Brightness) { + cid = V4L2_CID_BRIGHTNESS; + offset = 128; + } else if (id == controls::Contrast) { + cid = V4L2_CID_CONTRAST; + offset = 0; + } else if (id == controls::Saturation) { + cid = V4L2_CID_SATURATION; + offset = 0; + } else { + continue; + } + + int32_t value = lroundf(it.second.get() * 128 + offset); + controls.set(cid, std::clamp(value, 0, 255)); + } + + for (const auto &ctrl : controls) + LOG(VIVID, Debug) + << "Setting control " << utils::hex(ctrl.first) + << " to " << ctrl.second.toString(); + + int ret = data->video_->setControls(&controls); + if (ret) { + LOG(VIVID, Error) << "Failed to set controls: " << ret; + return ret < 0 ? ret : -EINVAL; + } + + return ret; + } + +Declare the function prototype for the ``processControls`` function within the +private ``PipelineHandlerVivid`` class members, as it is only used internally as +a helper when processing Requests. + +.. code-block:: cpp + + private: + int processControls(VividCameraData *data, Request *request); + +A pipeline handler is responsible for applying controls provided in a Request to +the relevant hardware devices. This could be directly on the capture device, or +where appropriate by setting controls on V4L2Subdevices directly. Each pipeline +handler is responsible for understanding the correct procedure for applying +controls to the device they support. + +This example pipeline handler applies controls during the `queueRequestDevice`_ +function for each request, and applies them to the capture device through the +capture node. + +.. _queueRequestDevice: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html#a106914cca210640c9da9ee1f0419e83c + +In the ``queueRequestDevice`` function, replace the following: + +.. code-block:: cpp + + int ret = data->video_->queueBuffer(buffer); + if (ret < 0) + return ret; + +With the following code: + +.. code-block:: cpp + + int ret = processControls(data, request); + if (ret < 0) + return ret; + + ret = data->video_->queueBuffer(buffer); + if (ret < 0) + return ret; + +We also need to add the following include directive to support the control +value translation operations: + +.. code-block:: cpp + + #include + +Frame completion and event handling +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +libcamera implements a signals and slots mechanism (similar to `Qt Signals and +Slots`_) to connect event sources with callbacks to handle them. + +As a general summary, a ``Slot`` can be connected to a ``Signal``, which when +emitted triggers the execution of the connected slots. A detailed description +of the libcamera implementation is available in the `libcamera Signal and Slot`_ +classes documentation. + +.. _Qt Signals and Slots: https://doc.qt.io/qt-6/signalsandslots.html +.. _libcamera Signal and Slot: https://libcamera.org/api-html/classlibcamera_1_1Signal.html#details + +In order to notify applications about the availability of new frames and data, +the ``Camera`` device exposes two ``Signals`` to which applications can connect +to be notified of frame completion events. The ``bufferComplete`` signal serves +to report to applications the completion event of a single ``Stream`` part of a +``Request``, while the ``requestComplete`` signal notifies the completion of all +the ``Streams`` and data submitted as part of a request. This mechanism allows +implementation of partial request completion, which allows an application to +inspect completed buffers associated with the single streams without waiting for +all of them to be ready. + +The ``bufferComplete`` and ``requestComplete`` signals are emitted by the +``Camera`` device upon notifications received from the pipeline handler, which +tracks the buffers and request completion status. + +The single buffer completion notification is implemented by pipeline handlers by +`connecting`_ the ``bufferReady`` signal of the capture devices they have queued +buffers to, to a member function slot that handles processing of the completed +frames. When a buffer is ready, the pipeline handler must propagate the +completion of that buffer to the Camera by using the PipelineHandler base class +``completeBuffer`` function. When all of the buffers referenced by a ``Request`` +have been completed, the pipeline handler must again notify the ``Camera`` using +the PipelineHandler base class ``completeRequest`` function. The PipelineHandler +class implementation makes sure the request completion notifications are +delivered to applications in the same order as they have been submitted. + +.. _connecting: https://libcamera.org/api-html/classlibcamera_1_1Signal.html#aa04db72d5b3091ffbb4920565aeed382 + +Returning to the ``int VividCameraData::init()`` function, add the following +above the closing ``return 0;`` to connect the pipeline handler ``bufferReady`` +function to the V4L2 device buffer signal. + +.. code-block:: cpp + + video_->bufferReady.connect(this, &VividCameraData::bufferReady); + +Create the matching ``VividCameraData::bufferReady`` function after your +VividCameradata::init() implementation. + +The ``bufferReady`` function obtains the request from the buffer using the +``request`` function, and notifies the ``Camera`` that the buffer and +request are completed. In this simpler pipeline handler, there is only one +stream, so it completes the request immediately. You can find a more complex +example of event handling with supporting multiple streams in the libcamera +code-base. + +.. TODO: Add link + +.. code-block:: cpp + + void VividCameraData::bufferReady(FrameBuffer *buffer) + { + Request *request = buffer->request(); + + pipe_->completeBuffer(request, buffer); + pipe_->completeRequest(request); + } + +Testing a pipeline handler +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Once you've built the pipeline handler, we can rebuild the code base, and test +capture through the pipeline through both of the cam and qcam utilities. + +.. code-block:: shell + + ninja -C build + ./build/src/cam/cam -c vivid -C5 + +To test that the pipeline handler can detect a device, and capture input. + +Running the command above outputs (a lot of) information about pixel formats, +and then starts capturing frame data, and should provide an output such as the +following: + +.. code-block:: none + + user@dev:/home/libcamera$ ./build/src/cam/cam -c vivid -C5 + [42:34:08.573066847] [186470] INFO IPAManager ipa_manager.cpp:136 libcamera is not installed. Adding '/home/libcamera/build/src/ipa' to the IPA search path + [42:34:08.575908115] [186470] INFO Camera camera_manager.cpp:287 libcamera v0.0.11+876-7b27d262 + [42:34:08.610334268] [186471] INFO IPAProxy ipa_proxy.cpp:122 libcamera is not installed. Loading IPA configuration from '/home/libcamera/src/ipa/vimc/data' + Using camera vivid + [42:34:08.618462130] [186470] WARN V4L2 v4l2_pixelformat.cpp:176 Unsupported V4L2 pixel format Y10 + ... + [42:34:08.619901297] [186470] INFO Camera camera.cpp:793 configuring streams: (0) 1280x720-BGR888 + Capture 5 frames + fps: 0.00 stream0 seq: 000000 bytesused: 2764800 + fps: 4.98 stream0 seq: 000001 bytesused: 2764800 + fps: 5.00 stream0 seq: 000002 bytesused: 2764800 + fps: 5.03 stream0 seq: 000003 bytesused: 2764800 + fps: 5.03 stream0 seq: 000004 bytesused: 2764800 + +This demonstrates that the pipeline handler is successfully capturing frames, +but it is helpful to see the visual output and validate the images are being +processed correctly. The libcamera project also implements a Qt based +application which will render the frames in a window for visual inspection: + +.. code-block:: shell + + ./build/src/qcam/qcam -c vivid + +.. TODO: Running qcam with the vivid pipeline handler appears to have a bug and + no visual frames are seen. However disabling zero-copy on qcam renders + them successfully. diff --git a/spider-cam/libcamera/Documentation/guides/tracing.rst b/spider-cam/libcamera/Documentation/guides/tracing.rst new file mode 100644 index 0000000..ae960d8 --- /dev/null +++ b/spider-cam/libcamera/Documentation/guides/tracing.rst @@ -0,0 +1,147 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +Tracing Guide +============= + +Guide to tracing in libcamera. + +Profiling vs Tracing +-------------------- + +Tracing is recording timestamps at specific locations. libcamera provides a +tracing facility. This guide shows how to use this tracing facility. + +Tracing should not be confused with profiling, which samples execution +at periodic points in time. This can be done with other tools such as +callgrind, perf, gprof, etc., without modification to the application, +and is out of scope for this guide. + +Compiling +--------- + +To compile libcamera with tracing support, it must be enabled through the +meson ``tracing`` option. It depends on the lttng-ust library (available in the +``liblttng-ust-dev`` package for Debian-based distributions). +By default the tracing option in meson is set to ``auto``, so if +liblttng is detected, it will be enabled by default. Conversely, if the option +is set to disabled, then libcamera will be compiled without tracing support. + +Defining tracepoints +-------------------- + +libcamera already contains a set of tracepoints. To define additional +tracepoints, create a file +``include/libcamera/internal/tracepoints/{file}.tp``, where ``file`` is a +reasonable name related to the category of tracepoints that you wish to +define. For example, the tracepoints file for the Request object is called +``request.tp``. An entry for this file must be added in +``include/libcamera/internal/tracepoints/meson.build``. + +In this tracepoints file, define your tracepoints `as mandated by lttng +`_. The header boilerplate must *not* be +included (as it will conflict with the rest of our infrastructure), and +only the tracepoint definitions (with the ``TRACEPOINT_*`` macros) should be +included. + +All tracepoint providers shall be ``libcamera``. According to lttng, the +tracepoint provider should be per-project; this is the rationale for this +decision. To group tracepoint events, we recommend using +``{class_name}_{tracepoint_name}``, for example, ``request_construct`` for a +tracepoint for the constructor of the Request class. + +Tracepoint arguments may take C++ objects pointers, in which case the usual +C++ namespacing rules apply. The header that contains the necessary class +definitions must be included at the top of the tracepoint provider file. + +Note: the final parameter in ``TP_ARGS`` *must not* have a trailing comma, and +the parameters to ``TP_FIELDS`` are *space-separated*. Not following these will +cause compilation errors. + +Using tracepoints (in libcamera) +-------------------------------- + +To use tracepoints in libcamera, first the header needs to be included: + +``#include "libcamera/internal/tracepoints.h"`` + +Then to use the tracepoint: + +``LIBCAMERA_TRACEPOINT({tracepoint_event}, args...)`` + +This macro must be used, as opposed to lttng's macros directly, because +lttng is an optional dependency of libcamera, so the code must compile and run +even when lttng is not present or when tracing is disabled. + +The tracepoint provider name, as declared in the tracepoint definition, is not +included in the parameters of the tracepoint. + +There are also two special tracepoints available for tracing IPA calls: + +``LIBCAMERA_TRACEPOINT_IPA_BEGIN({pipeline_name}, {ipa_function})`` + +``LIBCAMERA_TRACEPOINT_IPA_END({pipeline_name}, {ipa_function})`` + +These shall be placed where an IPA function is called from the pipeline handler, +and when the pipeline handler receives the corresponding response from the IPA, +respectively. These are the tracepoints that our sample analysis script +(see "Analyzing a trace") scans for when computing statistics on IPA call time. + +Using tracepoints (from an application) +--------------------------------------- + +As applications are not part of libcamera, but rather users of libcamera, +applications should seek their own tracing mechanisms. For ease of tracing +the application alongside tracing libcamera, it is recommended to also +`use lttng `_. + +Using tracepoints (from closed-source IPA) +------------------------------------------ + +Similar to applications, closed-source IPAs can simply use lttng on their own, +or any other tracing mechanism if desired. + +Collecting a trace +------------------ + +A trace can be collected fairly simply from lttng: + +.. code-block:: bash + + lttng create $SESSION_NAME + lttng enable-event -u libcamera:\* + lttng start + # run libcamera application + lttng stop + lttng view + lttng destroy $SESSION_NAME + +See the `lttng documentation `_ for further details. + +The location of the trace file is printed when running +``lttng create $SESSION_NAME``. After destroying the session, it can still be +viewed by: ``lttng view -t $PATH_TO_TRACE``, where ``$PATH_TO_TRACE`` is the +path that was printed when the session was created. This is the same path that +is used when analyzing traces programatically, as described in the next section. + +Analyzing a trace +----------------- + +As mentioned above, while an lttng tracing session exists and the trace is not +running, the trace output can be viewed as text by ``lttng view``. + +The trace log can also be viewed as text using babeltrace2. See the +`lttng trace analysis documentation +`_ +for further details. + +babeltrace2 also has a C API and python bindings that can be used to process +traces. See the +`lttng python bindings documentation `_ +and the +`lttng C API documentation `_ +for more details. + +As an example, there is a script ``utils/tracepoints/analyze-ipa-trace.py`` +that gathers statistics for the time taken for an IPA function call, by +measuring the time difference between pairs of events +``libcamera:ipa_call_start`` and ``libcamera:ipa_call_finish``. diff --git a/spider-cam/libcamera/Documentation/images/rotation/rotate0.svg b/spider-cam/libcamera/Documentation/images/rotation/rotate0.svg new file mode 100644 index 0000000..13cde16 --- /dev/null +++ b/spider-cam/libcamera/Documentation/images/rotation/rotate0.svg @@ -0,0 +1,132 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spider-cam/libcamera/Documentation/images/rotation/rotate0Mirror.svg b/spider-cam/libcamera/Documentation/images/rotation/rotate0Mirror.svg new file mode 100644 index 0000000..a7edda8 --- /dev/null +++ b/spider-cam/libcamera/Documentation/images/rotation/rotate0Mirror.svg @@ -0,0 +1,135 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spider-cam/libcamera/Documentation/images/rotation/rotate180.svg b/spider-cam/libcamera/Documentation/images/rotation/rotate180.svg new file mode 100644 index 0000000..d092a53 --- /dev/null +++ b/spider-cam/libcamera/Documentation/images/rotation/rotate180.svg @@ -0,0 +1,135 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spider-cam/libcamera/Documentation/images/rotation/rotate180Mirror.svg b/spider-cam/libcamera/Documentation/images/rotation/rotate180Mirror.svg new file mode 100644 index 0000000..d4a77d5 --- /dev/null +++ b/spider-cam/libcamera/Documentation/images/rotation/rotate180Mirror.svg @@ -0,0 +1,135 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spider-cam/libcamera/Documentation/images/rotation/rotate270.svg b/spider-cam/libcamera/Documentation/images/rotation/rotate270.svg new file mode 100644 index 0000000..13ea1e5 --- /dev/null +++ b/spider-cam/libcamera/Documentation/images/rotation/rotate270.svg @@ -0,0 +1,135 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spider-cam/libcamera/Documentation/images/rotation/rotate270Mirror.svg b/spider-cam/libcamera/Documentation/images/rotation/rotate270Mirror.svg new file mode 100644 index 0000000..6116f50 --- /dev/null +++ b/spider-cam/libcamera/Documentation/images/rotation/rotate270Mirror.svg @@ -0,0 +1,135 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spider-cam/libcamera/Documentation/images/rotation/rotate90.svg b/spider-cam/libcamera/Documentation/images/rotation/rotate90.svg new file mode 100644 index 0000000..af62763 --- /dev/null +++ b/spider-cam/libcamera/Documentation/images/rotation/rotate90.svg @@ -0,0 +1,135 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spider-cam/libcamera/Documentation/images/rotation/rotate90Mirror.svg b/spider-cam/libcamera/Documentation/images/rotation/rotate90Mirror.svg new file mode 100644 index 0000000..1760c46 --- /dev/null +++ b/spider-cam/libcamera/Documentation/images/rotation/rotate90Mirror.svg @@ -0,0 +1,135 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spider-cam/libcamera/Documentation/index.rst b/spider-cam/libcamera/Documentation/index.rst new file mode 100644 index 0000000..5442ae7 --- /dev/null +++ b/spider-cam/libcamera/Documentation/index.rst @@ -0,0 +1,27 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +.. Front page matter is defined in the project README file. +.. include:: ../README.rst + :start-after: .. section-begin-libcamera + :end-before: .. section-end-libcamera + +.. toctree:: + :maxdepth: 1 + :caption: Contents: + + Home + Docs + Contribute + Getting Started + + Developer Guide + Application Writer's Guide + Pipeline Handler Writer's Guide + IPA Writer's guide + Tracing guide + Environment variables + Sensor driver requirements + Lens driver requirements + Python Bindings + Camera Sensor Model + SoftwareISP Benchmarking diff --git a/spider-cam/libcamera/Documentation/lens_driver_requirements.rst b/spider-cam/libcamera/Documentation/lens_driver_requirements.rst new file mode 100644 index 0000000..b96e502 --- /dev/null +++ b/spider-cam/libcamera/Documentation/lens_driver_requirements.rst @@ -0,0 +1,27 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +.. _lens-driver-requirements: + +Lens Driver Requirements +======================== + +libcamera handles lens devices in the CameraLens class and defines +a consistent interface through its API towards other library components. + +The CameraLens class uses the V4L2 subdev kernel API to interface with the +camera lens through a sub-device exposed to userspace by the lens driver. + +In order for libcamera to be fully operational and provide all the required +information to interface with the camera lens to applications and pipeline +handlers, a set of mandatory features the driver has to support has been defined. + +Mandatory Requirements +---------------------- + +The lens driver is assumed to be fully compliant with the V4L2 specification. + +The lens driver shall support the following V4L2 controls: + +* `V4L2_CID_FOCUS_ABSOLUTE`_ + +.. _V4L2_CID_FOCUS_ABSOLUTE: https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/ext-ctrls-camera.html diff --git a/spider-cam/libcamera/Documentation/meson.build b/spider-cam/libcamera/Documentation/meson.build new file mode 100644 index 0000000..30d3952 --- /dev/null +++ b/spider-cam/libcamera/Documentation/meson.build @@ -0,0 +1,105 @@ +# SPDX-License-Identifier: CC0-1.0 + +doc_install_dir = get_option('datadir') / 'doc' / 'libcamera-@0@'.format(libcamera_version) + +# +# Doxygen +# + +doxygen = find_program('doxygen', required : get_option('documentation')) +dot = find_program('dot', required : get_option('documentation')) + +if doxygen.found() and dot.found() + cdata = configuration_data() + cdata.set('VERSION', 'v@0@'.format(libcamera_git_version)) + cdata.set('TOP_SRCDIR', meson.project_source_root()) + cdata.set('TOP_BUILDDIR', meson.project_build_root()) + cdata.set('OUTPUT_DIR', meson.current_build_dir()) + cdata.set('WARN_AS_ERROR', get_option('doc_werror') ? 'YES' : 'NO') + + doxygen_predefined = [] + foreach key : config_h.keys() + doxygen_predefined += '@0@=@1@'.format(key, config_h.get(key)) + endforeach + + cdata.set('PREDEFINED', ' \\\n\t\t\t '.join(doxygen_predefined)) + + doxyfile = configure_file(input : 'Doxyfile.in', + output : 'Doxyfile', + configuration : cdata) + + doxygen_input = [ + doxyfile, + libcamera_base_headers, + libcamera_base_sources, + libcamera_internal_headers, + libcamera_ipa_headers, + libcamera_ipa_interfaces, + libcamera_public_headers, + libcamera_sources, + libipa_headers, + libipa_sources, + ] + + if is_variable('ipu3_ipa_sources') + doxygen_input += [ipu3_ipa_sources] + endif + + custom_target('doxygen', + input : doxygen_input, + output : 'api-html', + command : [doxygen, doxyfile], + install : true, + install_dir : doc_install_dir, + install_tag : 'doc') +endif + +# +# Sphinx +# + +sphinx = find_program('sphinx-build-3', required : false) +if not sphinx.found() + sphinx = find_program('sphinx-build', required : get_option('documentation')) +endif + +if sphinx.found() + docs_sources = [ + 'camera-sensor-model.rst', + 'code-of-conduct.rst', + 'coding-style.rst', + 'conf.py', + 'contributing.rst', + 'docs.rst', + 'environment_variables.rst', + 'guides/application-developer.rst', + 'guides/introduction.rst', + 'guides/ipa.rst', + 'guides/pipeline-handler.rst', + 'guides/tracing.rst', + 'index.rst', + 'lens_driver_requirements.rst', + 'python-bindings.rst', + 'sensor_driver_requirements.rst', + 'software-isp-benchmarking.rst', + '../README.rst', + ] + + release = 'release=v' + libcamera_git_version + + custom_target('documentation', + command : [sphinx, '-D', release, '-q', '-W', '-b', 'html', + meson.current_source_dir(), '@OUTPUT@'], + input : docs_sources, + output : 'html', + build_by_default : true, + install : true, + install_dir : doc_install_dir, + install_tag : 'doc') + + custom_target('documentation-linkcheck', + command : [sphinx, '-W', '-b', 'linkcheck', meson.current_source_dir(), '@OUTPUT@'], + build_always_stale : true, + input : docs_sources, + output : 'linkcheck') +endif diff --git a/spider-cam/libcamera/Documentation/python-bindings.rst b/spider-cam/libcamera/Documentation/python-bindings.rst new file mode 100644 index 0000000..ed9f686 --- /dev/null +++ b/spider-cam/libcamera/Documentation/python-bindings.rst @@ -0,0 +1,70 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +.. _python-bindings: + +Python Bindings for libcamera +============================= + +.. warning:: + The bindings are under work, and the API will change. + +Differences to the C++ API +-------------------------- + +As a rule of thumb the bindings try to follow the C++ API when possible. This +chapter lists the differences. + +Mostly these differences fall under two categories: + +1. Differences caused by the inherent differences between C++ and Python. + These differences are usually caused by the use of threads or differences in + C++ vs Python memory management. + +2. Differences caused by the code being work-in-progress. It's not always + trivial to create a binding in a satisfying way, and the current bindings + contain simplified versions of the C++ API just to get forward. These + differences are expected to eventually go away. + +Coding Style +------------ + +The C++ code for the bindings follows the libcamera coding style as much as +possible. Note that the indentation does not quite follow the clang-format +style, as clang-format makes a mess of the style used. + +The API visible to the Python side follows the Python style as much as possible. + +This means that e.g. ``Camera::generateConfiguration`` maps to +``Camera.generate_configuration``. + +CameraManager +------------- + +The Python API provides a singleton CameraManager via ``CameraManager.singleton()``. +There is no need to start or stop the CameraManager. + +Handling Completed Requests +--------------------------- + +The Python bindings do not expose the ``Camera::requestCompleted`` signal +directly as the signal is invoked from another thread and it has real-time +constraints. Instead the bindings queue the completed requests internally and +use an eventfd to inform the user that there are completed requests. + +The user can wait on the eventfd, and upon getting an event, use +``CameraManager.get_ready_requests()`` to clear the eventfd event and to get +the completed requests. + +Controls & Properties +--------------------- + +The classes related to controls and properties are rather complex to implement +directly in the Python bindings. There are some simplifications in the Python +bindings: + +- There is no ControlValue class. Python objects are automatically converted + to ControlValues and vice versa. +- There is no ControlList class. A Python dict with ControlId keys and Python + object values is used instead. +- There is no ControlInfoMap class. A Python dict with ControlId keys and + ControlInfo values is used instead. diff --git a/spider-cam/libcamera/Documentation/sensor_driver_requirements.rst b/spider-cam/libcamera/Documentation/sensor_driver_requirements.rst new file mode 100644 index 0000000..0e516b3 --- /dev/null +++ b/spider-cam/libcamera/Documentation/sensor_driver_requirements.rst @@ -0,0 +1,93 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +.. _sensor-driver-requirements: + +Sensor Driver Requirements +========================== + +libcamera handles imaging devices in the CameraSensor class and defines +a consistent interface through its API towards other library components. + +The CameraSensor class uses the V4L2 subdev kernel API to interface with the +camera sensor through one or multiple sub-devices exposed in userspace by +the sensor driver. + +In order for libcamera to be fully operational and provide all the required +information to interface with the camera sensor to applications and pipeline +handlers, a set of mandatory and optional features the driver has to support +has been defined. + +Mandatory Requirements +---------------------- + +The sensor driver is assumed to be fully compliant with the V4L2 specification. + +For RAW sensors, the sensor driver shall support the following V4L2 controls: + +* `V4L2_CID_ANALOGUE_GAIN`_ +* `V4L2_CID_EXPOSURE`_ +* `V4L2_CID_HBLANK`_ +* `V4L2_CID_PIXEL_RATE`_ +* `V4L2_CID_VBLANK`_ + +.. _V4L2_CID_ANALOGUE_GAIN: https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/ext-ctrls-image-source.html +.. _V4L2_CID_EXPOSURE: https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/control.html +.. _V4L2_CID_HBLANK: https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/ext-ctrls-image-source.html +.. _V4L2_CID_PIXEL_RATE: https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/ext-ctrls-image-process.html +.. _V4L2_CID_VBLANK: https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/ext-ctrls-image-source.html + +The ``ANALOGUE_GAIN`` control units are sensor-specific. libcamera requires +a sensor-specific CameraSensorHelper implementation to translate between the +sensor specific ``gain code`` and the analogue ``gain value`` expressed as an +absolute number as defined by ``controls::AnalogueGain``. + +While V4L2 doesn't specify a unit for the ``EXPOSURE`` control, libcamera +requires it to be expressed as a number of image lines. Camera sensor drivers +that do not comply with this requirement will need to be adapted or will produce +incorrect results. + +The ``HBLANK``, ``PIXEL_RATE`` and ``VBLANK`` controls are used to compute the +sensor output timings. + +Optional Requirements +--------------------- + +The sensor driver should support the following V4L2 controls: + +* `V4L2_CID_CAMERA_ORIENTATION`_ +* `V4L2_CID_CAMERA_SENSOR_ROTATION`_ + +.. _V4L2_CID_CAMERA_ORIENTATION: https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/ext-ctrls-camera.html +.. _V4L2_CID_CAMERA_SENSOR_ROTATION: https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/ext-ctrls-camera.html + +The controls are used to register the camera location and rotation. + +In order to support rotating the image the sensor driver should support + +* `V4L2_CID_HFLIP`_ +* `V4L2_CID_VFLIP`_ + +.. _V4L2_CID_HFLIP: https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/control.html +.. _V4L2_CID_VFLIP: https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/control.html + +The controls must be writable from userspace. In case of a RAW Bayer sensors, +drivers should correctly report if vertical/horizontal flips modify the Bayer +pattern ordering by reporting the `V4L2_CTRL_FLAG_MODIFY_LAYOUT` control flag. + +The sensor driver should implement support for the V4L2 Selection API, +specifically it should implement support for the +`VIDIOC_SUBDEV_G_SELECTION`_ ioctl with support for the following selection +targets: + +.. _VIDIOC_SUBDEV_G_SELECTION: https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/vidioc-subdev-g-selection.html#c.V4L.VIDIOC_SUBDEV_G_SELECTION + +* `V4L2_SEL_TGT_CROP_BOUNDS`_ to report the readable pixel array area size +* `V4L2_SEL_TGT_CROP_DEFAULT`_ to report the active pixel array area size +* `V4L2_SEL_TGT_CROP`_ to report the analogue selection rectangle + +Support for the selection API is scheduled to become a mandatory feature in +the near future. + +.. _V4L2_SEL_TGT_CROP_BOUNDS: https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/v4l2-selection-targets.html +.. _V4L2_SEL_TGT_CROP_DEFAULT: https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/v4l2-selection-targets.html +.. _V4L2_SEL_TGT_CROP: https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/v4l2-selection-targets.html diff --git a/spider-cam/libcamera/Documentation/sensor_model.svg b/spider-cam/libcamera/Documentation/sensor_model.svg new file mode 100644 index 0000000..02dc55a --- /dev/null +++ b/spider-cam/libcamera/Documentation/sensor_model.svg @@ -0,0 +1,4870 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Pixel array + Analog crop + Subsampling + Digital crop + Media Bus + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + 2 + + 4 + + 3 + + diff --git a/spider-cam/libcamera/Documentation/skipping.svg b/spider-cam/libcamera/Documentation/skipping.svg new file mode 100644 index 0000000..7bef37c --- /dev/null +++ b/spider-cam/libcamera/Documentation/skipping.svg @@ -0,0 +1,1720 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + x_even_inc = 1 + y_even_inc = 1 + 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 + x_odd_inc = 3 + y_odd_inc = 3 + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + + + + diff --git a/spider-cam/libcamera/Documentation/software-isp-benchmarking.rst b/spider-cam/libcamera/Documentation/software-isp-benchmarking.rst new file mode 100644 index 0000000..b303313 --- /dev/null +++ b/spider-cam/libcamera/Documentation/software-isp-benchmarking.rst @@ -0,0 +1,77 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +.. _software-isp-benchmarking: + +Software ISP benchmarking +========================= + +The Software ISP is particularly sensitive to performance regressions therefore +it is a good idea to always benchmark the Software ISP before and after making +changes to it and ensure that there are no performance regressions. + +DebayerCpu class builtin benchmark +---------------------------------- + +The DebayerCpu class has a builtin benchmark. This benchmark measures the time +spent on processing (collecting statistics and debayering) only, it does not +measure the time spent on capturing or outputting the frames. + +The builtin benchmark always runs. So this can be used by simply running "cam" +or "qcam" with a pipeline using the Software ISP. + +When it runs it will skip measuring the first 30 frames to allow the caches and +the CPU temperature (turbo-ing) to warm-up and then it measures 30 fps and shows +the total and per frame processing time using an info level log message: + +.. code-block:: text + + INFO Debayer debayer_cpu.cpp:907 Processed 30 frames in 244317us, 8143 us/frame + +To get stable measurements it is advised to disable any other processes which +may cause significant CPU usage (e.g. disable wifi, bluetooth and browsers). +When possible it is also advisable to disable CPU turbo-ing and +frequency-scaling. + +For example when benchmarking on a Lenovo ThinkPad X1 Yoga Gen 8, with the +charger plugged in, the CPU can be fixed to run at 2 GHz using: + +.. code-block:: shell + + sudo x86_energy_perf_policy --turbo-enable 0 + sudo cpupower frequency-set -d 2GHz -u 2GHz + +with these settings the builtin bench reports a processing time of ~7.8ms/frame +on this laptop for FHD SGRBG10 (unpacked) bayer data. + +Measuring power consumption +--------------------------- + +Since the Software ISP is often used on mobile devices it is also important to +measure power consumption and ensure that that does not regress. + +For example to measure power consumption on a Lenovo ThinkPad X1 Yoga Gen 8 it +needs to be running on battery and it should be configured with its +platform-profile (/sys/firmware/acpi/platform_profile) set to balanced and with +its default turbo and frequency-scaling behavior to match real world usage. + +Then start qcam to capture a FHD picture at 30 fps and position the qcam window +so that it is fully visible. After this run the following command to monitor the +power consumption: + +.. code-block:: shell + + watch -n 10 cat /sys/class/power_supply/BAT0/power_now /sys/class/hwmon/hwmon6/fan?_input + +Note this not only measures the power consumption in µW it also monitors the +speed of this laptop's 2 fans. This is important because depending on the +ambient temperature the 2 fans may spin up while testing and this will cause an +additional power consumption of approx. 0.5 W messing up the measurement. + +After starting qcam + the watch command let the laptop sit without using it for +2 minutes for the readings to stabilize. Then check that the fans have not +turned on and manually take a couple of consecutive power readings and average +these. + +On the example Lenovo ThinkPad X1 Yoga Gen 8 laptop this results in a measured +power consumption of approx. 13 W while running qcam versus approx. 4-5 W while +setting idle with its OLED panel on. diff --git a/spider-cam/libcamera/Documentation/theme/footer.html b/spider-cam/libcamera/Documentation/theme/footer.html new file mode 100644 index 0000000..12939e8 --- /dev/null +++ b/spider-cam/libcamera/Documentation/theme/footer.html @@ -0,0 +1,14 @@ +{# +SPDX-License-Identifier: CC-BY-SA-4.0 +#} +
+
+ {%- if show_copyright %} + {%- if hasdoc('copyright') %} + {% trans path=pathto('copyright'), copyright=copyright|e %}© Copyright {{ copyright }}.{% endtrans %} + {%- else %} + {% trans copyright=copyright|e %}© Copyright {{ copyright }}.{% endtrans %} + {%- endif %} + {%- endif %} +
+
diff --git a/spider-cam/libcamera/Documentation/theme/layout.html b/spider-cam/libcamera/Documentation/theme/layout.html new file mode 100644 index 0000000..4fffefa --- /dev/null +++ b/spider-cam/libcamera/Documentation/theme/layout.html @@ -0,0 +1,109 @@ +{# +SPDX-License-Identifier: CC-BY-SA-4.0 +#} +{# TEMPLATE VAR SETTINGS #} +{%- set url_root = pathto('', 1) %} +{%- if url_root == '#' %}{% set url_root = '' %}{% endif %} +{%- if not embedded and docstitle %} + {%- set titlesuffix = " — "|safe + docstitle|e %} +{%- else %} + {%- set titlesuffix = "" %} +{%- endif %} + + + + {{ metatags }} + + {% block htmltitle %} + {{ title|striptags|e }}{{ titlesuffix }} + {% endblock %} + + {# FAVICON #} + {% if favicon %} + + {% endif %} + + {# CSS #} + + {# OPENSEARCH #} + {% if not embedded %} + {% if use_opensearch %} + + {% endif %} + + {% endif %} + + {% for cssfile in css_files %} + + {% endfor %} + + {% for cssfile in extra_css_files %} + + {% endfor %} + + {%- block linktags %} + {%- if hasdoc('about') %} + + {%- endif %} + {%- if hasdoc('genindex') %} + + {%- endif %} + {%- if hasdoc('search') %} + + {%- endif %} + {%- if hasdoc('copyright') %} + + {%- endif %} + + {%- if parents %} + + {%- endif %} + {%- if next %} + + {%- endif %} + {%- if prev %} + + {%- endif %} + {%- endblock %} + {%- block extrahead %} {% endblock %} + + + + +
+ +
+ +
+ {# PAGE CONTENT #} +
+ {% block body %}{% endblock %} +
+
+ + {% include "footer.html" %} + + diff --git a/spider-cam/libcamera/Documentation/theme/search.html b/spider-cam/libcamera/Documentation/theme/search.html new file mode 100644 index 0000000..00c2af9 --- /dev/null +++ b/spider-cam/libcamera/Documentation/theme/search.html @@ -0,0 +1,63 @@ +{# +SPDX-License-Identifier: CC-BY-SA-4.0 +#} +{# + basic/search.html + ~~~~~~~~~~~~~~~~~ + + Template for the search page. + + :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +#} +{%- extends "layout.html" %} +{% block extrahead %} + + {%- for scriptfile in script_files %} + + {%- endfor %} + + + {# this is used when loading the search index using $.ajax fails, + such as on Chrome for documents on localhost #} + +{% endblock %} +{% block body %} +

{{ _('Search') }}

+
+ +

+ Please activate JavaScript to enable the search functionality. +

+
+

+ From here you can search these documents. Enter your search + words into the box below and click "search". Note that the search + function will automatically search for all of the words. Pages + containing fewer words won't appear in the result list. +

+
+ + + +
+ {% if search_performed %} +

{{ _('Search Results') }}

+ {% if not search_results %} +

{{ _('Your search did not match any documents. Please make sure that all words are spelled correctly and that you\'ve selected enough categories.') }}

+ {% endif %} + {% endif %} +
+ {% if search_results %} +
    + {% for href, caption, context in search_results %} +
  • {{ caption }} +
    {{ context|e }}
    +
  • + {% endfor %} +
+ {% endif %} +
+{% endblock %} diff --git a/spider-cam/libcamera/Documentation/theme/static/css/theme.css b/spider-cam/libcamera/Documentation/theme/static/css/theme.css new file mode 100644 index 0000000..d4274ea --- /dev/null +++ b/spider-cam/libcamera/Documentation/theme/static/css/theme.css @@ -0,0 +1,291 @@ +/* SPDX-License-Identifier: CC-BY-SA-4.0 */ + +html { + background-image: linear-gradient(to bottom right, #4895e1, #56c3ae); + background-size: cover; + background-repeat: no-repeat; + min-height: 100vh; +} + +body { + color: rgb(0, 0, 0, 0.65); + font-family: Arial, sans-serif; + margin: 0px; +} + +a { + color: unset; + font-weight: bold; + text-decoration: underline dotted; +} + +a.headerlink { + color: rgba(0, 0, 0, 0.2); + font-size: 70%; + padding-left: 5px; + visibility: hidden; +} + +a.toc-backref { + text-decoration: none; +} + +h1:hover a.headerlink, +h2:hover a.headerlink, +h3:hover a.headerlink, +h4:hover a.headerlink, +h5:hover a.headerlink, +h6:hover a.headerlink { + visibility: visible; +} + +dt { + font-weight: bold; +} + +.text-light { + color: rgba(255, 255, 255, 0.3); +} + +div#navbar { + margin-top: 0px; +} + +div.navbar-brand { + color: rgb(255, 255, 255, 1.0); + float: left; + font-size: 36px; + margin: 0px 24px 24px 24px; +} + +div.navbar-logo { + float: left; + font-family: monospace; + font-size: 18px; + font-weight: bold; + white-space: pre; +} + +div.navbar-name { + float: left; + color: rgb(255, 255, 255, 1.0); + font-size: 34px; + margin-top: 31px; + margin-left: 10px; + padding-top: 1px; +} + +div.navbar { + float: right; +} + +div.navbar p.caption { + height: 0px; + margin: 0px; + visibility: hidden; +} + +div.navbar ul { + float: left; + font-size: 24px; + list-style: none; + margin-top: 42px; + margin-right: 20px; + padding-left: 0px; +} + +div.navbar a { + font-weight: normal; + text-decoration: none; +} + +div.navbar li { + float: left; + margin-left: 20px; + margin-right: 20px; + position: relative; +} + +div.navbar li a { + color: rgb(255, 255, 255, 0.5); + position: relative; +} + +div.navbar li a:before { + content: ""; + position: absolute; + width: 100%; + height: 2px; + bottom: 0; + left: 0; + background-color: rgb(255, 255, 255, 0.5); + visibility: hidden; + transform: scaleX(0); + transition: all 0.3s ease-in-out 0s; +} + +div.navbar li a:hover { + color: rgb(255, 255, 255, 1.0); +} + +div.navbar li a:hover:before { + visibility: visible; + transform: scaleX(1); +} + +div.navbar li.current a { + color: rgb(255, 255, 255, 1.0); +} + +div.navbar li.current a:before { + visibility: visible; + transform: unset; + transition: unset; +} + +div.navbar div.searchbox { + background-color: white; + float: right; + margin-right: 50px; + margin-top: 42px; +} + +div.navbar input[type=text] { + border-width: 0; + height: 2em; + margin-left: 10px; + margin-right: 5px; +} + +div.navbar input[type=submit] { + background-color: white; + background-image: url(../search.png); + background-repeat: no-repeat; + border-width: 0; + color: rgba(0, 0, 0, 0); + margin-right: 2px; + width: 20px; +} + +div#frontpage { + clear: both; + padding-top: 50px; + margin-left: auto; + margin-right: auto; + width: 75%; + display: flex; + justify-content: space-between; +} + +div#frontpage > div.block { + background-color: white; + border-radius: 5px; + box-shadow: 0 4px 16px 0 rgba(0, 0, 0, 0.2), 0 6px 40px 0 rgba(0, 0, 0, 0.19); + color: rgb(0, 0, 0, 0.5); + font-size: 20px; + margin-bottom: 40px; + margin-right: 20px; + margin-left: 20px; + padding: 20px 60px 20px 60px; + text-align: center; + width: 50%; +} + +div#frontpage > div.block h1 { + font-size: 64px; + padding-left: 20%; + padding-right: 20%; + text-align: center; + text-shadow: 4px 4px 5px; +} + +div#content { + background-color: white; + clear: both; + padding-top: 50px; + padding-bottom: 50px; + margin-left: 0px; + margin-right: 0px; +} + +div#content > div.block { + font-size: 16px; + margin-right: 0px; + margin-left: 0px; + max-width: 1280px; + padding: 0px 60px 0px 60px; + text-align: justify; +} + +div#content > div.block h1 { + font-size: 40px; + margin-top: 0px; + text-align: left; +} + +div#content > div.block > div.section { + max-width: 800px; +} + +div.local.topic { + float: right; + background-color: #fcfcff; + border: 1px dotted #4896e0; + margin-left: 20px; + margin-right: 0px; + max-width: 15em; + padding: 10px 20px 10px 10px; + text-align: left; +} + +div.local.topic ul { + padding-left: 20px; + margin-bottom: 5px; +} + +div.local.topic > ul:before { + content: "Contents"; + display: block; + font-weight: bold; + margin-bottom: 10px; +} + +div.local.topic a { + font-weight: normal; + padding-left: 10px; + text-decoration: none; +} + +div.highlight-shell > div.highlight > pre, +pre.console { + background-color: #fcfcff; + border: 1px dotted #4896e0; + margin-left: 0em; + padding: 10px; + text-align: left; +} + +div.highlight-default > div.highlight > pre, +pre.diagram { + background-color: #fcfcff; + border: 1px dotted #4896e0; + font-size: 12px; + margin-left: 0em; + padding: 10px; + text-align: left; + width: 47em; +} + +div#signature { + color: rgb(255, 255, 255, 0.5); + margin: 20px; + float: right; + font-size: 12px; +} + +#libcamera div.toctree-wrapper { + height: 0px; + margin: 0px; + padding: 0px; + visibility: hidden; +} diff --git a/spider-cam/libcamera/Documentation/theme/static/search.png b/spider-cam/libcamera/Documentation/theme/static/search.png new file mode 100644 index 0000000..a93c40e Binary files /dev/null and b/spider-cam/libcamera/Documentation/theme/static/search.png differ diff --git a/spider-cam/libcamera/Documentation/theme/theme.conf b/spider-cam/libcamera/Documentation/theme/theme.conf new file mode 100644 index 0000000..f2ab39c --- /dev/null +++ b/spider-cam/libcamera/Documentation/theme/theme.conf @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: CC-BY-SA-4.0 + +[theme] +inherit = basic +stylesheet = css/theme.css + +[options] diff --git a/spider-cam/libcamera/LICENSES/Apache-2.0.txt b/spider-cam/libcamera/LICENSES/Apache-2.0.txt new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/spider-cam/libcamera/LICENSES/Apache-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/spider-cam/libcamera/LICENSES/BSD-2-Clause.txt b/spider-cam/libcamera/LICENSES/BSD-2-Clause.txt new file mode 100644 index 0000000..2d2bab1 --- /dev/null +++ b/spider-cam/libcamera/LICENSES/BSD-2-Clause.txt @@ -0,0 +1,22 @@ +Copyright (c) . All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/spider-cam/libcamera/LICENSES/BSD-3-Clause.txt b/spider-cam/libcamera/LICENSES/BSD-3-Clause.txt new file mode 100644 index 0000000..0741db7 --- /dev/null +++ b/spider-cam/libcamera/LICENSES/BSD-3-Clause.txt @@ -0,0 +1,26 @@ +Copyright (c) . All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors +may be used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/spider-cam/libcamera/LICENSES/CC-BY-4.0.txt b/spider-cam/libcamera/LICENSES/CC-BY-4.0.txt new file mode 100644 index 0000000..13ca539 --- /dev/null +++ b/spider-cam/libcamera/LICENSES/CC-BY-4.0.txt @@ -0,0 +1,156 @@ +Creative Commons Attribution 4.0 International + + Creative Commons Corporation (“Creative Commonsâ€) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is†basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. + +Considerations for licensors: Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. More considerations for licensors. + +Considerations for the public: By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. More considerations for the public. + +Creative Commons Attribution 4.0 International Public License + +By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. + +Section 1 – Definitions. + + a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. + + d. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License. + + g. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights under this Public License. + + i. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. + + j. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. + + k. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. + +Section 2 – Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: + + A. reproduce and Share the Licensed Material, in whole or in part; and + + B. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. + + 3. Term. The term of this Public License is specified in Section 6(a). + + 4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material. + + 5. Downstream recipients. + + A. Offer from the Licensor – Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. + + B. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. + + 6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). + +b. Other rights. + + 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this Public License. + + 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties. + +Section 3 – License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified form), You must: + + A. retain the following if it is supplied by the Licensor with the Licensed Material: + + i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of warranties; + + v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; + + B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and + + C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. + + 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's License You apply must not prevent recipients of the Adapted Material from complying with this Public License. + +Section 4 – Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database; + + b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. +For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. + +Section 5 – Disclaimer of Warranties and Limitation of Liability. + + a. Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You. + + b. To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You. + + c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. + +Section 6 – Term and Termination. + + a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or + + 2. upon express reinstatement by the Licensor. + + c. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. + + d. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. + + e. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. + +Section 7 – Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. + +Section 8 – Interpretation. + + a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. + + c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. + + d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. + +Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.†Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at creativecommons.org/policies, Creative Commons does not authorize the use of the trademark “Creative Commons†or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/spider-cam/libcamera/LICENSES/CC-BY-SA-4.0.txt b/spider-cam/libcamera/LICENSES/CC-BY-SA-4.0.txt new file mode 100644 index 0000000..31279dd --- /dev/null +++ b/spider-cam/libcamera/LICENSES/CC-BY-SA-4.0.txt @@ -0,0 +1,428 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the “Licensor.†The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. + diff --git a/spider-cam/libcamera/LICENSES/CC0-1.0.txt b/spider-cam/libcamera/LICENSES/CC0-1.0.txt new file mode 100644 index 0000000..a343ccd --- /dev/null +++ b/spider-cam/libcamera/LICENSES/CC0-1.0.txt @@ -0,0 +1,119 @@ +Creative Commons Legal Code + +CC0 1.0 Universal CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES +NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE +AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION +ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE USE +OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER, AND DISCLAIMS +LIABILITY FOR DAMAGES RESULTING FROM THE USE OF THIS DOCUMENT OR THE INFORMATION +OR WORKS PROVIDED HEREUNDER. + +Statement of Purpose + +The laws of most jurisdictions throughout the world automatically confer exclusive +Copyright and Related Rights (defined below) upon the creator and subsequent +owner(s) (each and all, an "owner") of an original work of authorship and/or +a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for the +purpose of contributing to a commons of creative, cultural and scientific +works ("Commons") that the public can reliably and without fear of later claims +of infringement build upon, modify, incorporate in other works, reuse and +redistribute as freely as possible in any form whatsoever and for any purposes, +including without limitation commercial purposes. These owners may contribute +to the Commons to promote the ideal of a free culture and the further production +of creative, cultural and scientific works, or to gain reputation or greater +distribution for their Work in part through the use and efforts of others. + +For these and/or other purposes and motivations, and without any expectation +of additional consideration or compensation, the person associating CC0 with +a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright +and Related Rights in the Work, voluntarily elects to apply CC0 to the Work +and publicly distribute the Work under its terms, with knowledge of his or +her Copyright and Related Rights in the Work and the meaning and intended +legal effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be protected +by copyright and related or neighboring rights ("Copyright and Related Rights"). +Copyright and Related Rights include, but are not limited to, the following: + +i. the right to reproduce, adapt, distribute, perform, display, communicate, +and translate a Work; + + ii. moral rights retained by the original author(s) and/or performer(s); + +iii. publicity and privacy rights pertaining to a person's image or likeness +depicted in a Work; + +iv. rights protecting against unfair competition in regards to a Work, subject +to the limitations in paragraph 4(a), below; + +v. rights protecting the extraction, dissemination, use and reuse of data +in a Work; + +vi. database rights (such as those arising under Directive 96/9/EC of the +European Parliament and of the Council of 11 March 1996 on the legal protection +of databases, and under any national implementation thereof, including any +amended or successor version of such directive); and + +vii. other similar, equivalent or corresponding rights throughout the world +based on applicable law or treaty, and any national implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention of, +applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and +unconditionally waives, abandons, and surrenders all of Affirmer's Copyright +and Related Rights and associated claims and causes of action, whether now +known or unknown (including existing as well as future claims and causes of +action), in the Work (i) in all territories worldwide, (ii) for the maximum +duration provided by applicable law or treaty (including future time extensions), +(iii) in any current or future medium and for any number of copies, and (iv) +for any purpose whatsoever, including without limitation commercial, advertising +or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the +benefit of each member of the public at large and to the detriment of Affirmer's +heirs and successors, fully intending that such Waiver shall not be subject +to revocation, rescission, cancellation, termination, or any other legal or +equitable action to disrupt the quiet enjoyment of the Work by the public +as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason be +judged legally invalid or ineffective under applicable law, then the Waiver +shall be preserved to the maximum extent permitted taking into account Affirmer's +express Statement of Purpose. In addition, to the extent the Waiver is so +judged Affirmer hereby grants to each affected person a royalty-free, non +transferable, non sublicensable, non exclusive, irrevocable and unconditional +license to exercise Affirmer's Copyright and Related Rights in the Work (i) +in all territories worldwide, (ii) for the maximum duration provided by applicable +law or treaty (including future time extensions), (iii) in any current or +future medium and for any number of copies, and (iv) for any purpose whatsoever, +including without limitation commercial, advertising or promotional purposes +(the "License"). The License shall be deemed effective as of the date CC0 +was applied by Affirmer to the Work. Should any part of the License for any +reason be judged legally invalid or ineffective under applicable law, such +partial invalidity or ineffectiveness shall not invalidate the remainder of +the License, and in such case Affirmer hereby affirms that he or she will +not (i) exercise any of his or her remaining Copyright and Related Rights +in the Work or (ii) assert any associated claims and causes of action with +respect to the Work, in either case contrary to Affirmer's express Statement +of Purpose. + + 4. Limitations and Disclaimers. + +a. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, +licensed or otherwise affected by this document. + +b. Affirmer offers the Work as-is and makes no representations or warranties +of any kind concerning the Work, express, implied, statutory or otherwise, +including without limitation warranties of title, merchantability, fitness +for a particular purpose, non infringement, or the absence of latent or other +defects, accuracy, or the present or absence of errors, whether or not discoverable, +all to the greatest extent permissible under applicable law. + +c. Affirmer disclaims responsibility for clearing rights of other persons +that may apply to the Work or any use thereof, including without limitation +any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims +responsibility for obtaining any necessary consents, permissions or other +rights required for any use of the Work. + +d. Affirmer understands and acknowledges that Creative Commons is not a party +to this document and has no duty or obligation with respect to this CC0 or +use of the Work. diff --git a/spider-cam/libcamera/LICENSES/GPL-2.0+.txt b/spider-cam/libcamera/LICENSES/GPL-2.0+.txt new file mode 120000 index 0000000..ec035b6 --- /dev/null +++ b/spider-cam/libcamera/LICENSES/GPL-2.0+.txt @@ -0,0 +1 @@ +GPL-2.0-or-later.txt \ No newline at end of file diff --git a/spider-cam/libcamera/LICENSES/GPL-2.0-only.txt b/spider-cam/libcamera/LICENSES/GPL-2.0-only.txt new file mode 100644 index 0000000..d159169 --- /dev/null +++ b/spider-cam/libcamera/LICENSES/GPL-2.0-only.txt @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/spider-cam/libcamera/LICENSES/GPL-2.0-or-later.txt b/spider-cam/libcamera/LICENSES/GPL-2.0-or-later.txt new file mode 100644 index 0000000..1d80ac3 --- /dev/null +++ b/spider-cam/libcamera/LICENSES/GPL-2.0-or-later.txt @@ -0,0 +1,319 @@ +GNU GENERAL PUBLIC LICENSE + +Version 2, June 1991 + +Copyright (C) 1989, 1991 Free Software Foundation, Inc. + +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + +Everyone is permitted to copy and distribute verbatim copies of this license +document, but changing it is not allowed. + +Preamble + +The licenses for most software are designed to take away your freedom to share +and change it. By contrast, the GNU General Public License is intended to +guarantee your freedom to share and change free software--to make sure the +software is free for all its users. This General Public License applies to +most of the Free Software Foundation's software and to any other program whose +authors commit to using it. (Some other Free Software Foundation software +is covered by the GNU Lesser General Public License instead.) You can apply +it to your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our +General Public Licenses are designed to make sure that you have the freedom +to distribute copies of free software (and charge for this service if you +wish), that you receive source code or can get it if you want it, that you +can change the software or use pieces of it in new free programs; and that +you know you can do these things. + +To protect your rights, we need to make restrictions that forbid anyone to +deny you these rights or to ask you to surrender the rights. These restrictions +translate to certain responsibilities for you if you distribute copies of +the software, or if you modify it. + +For example, if you distribute copies of such a program, whether gratis or +for a fee, you must give the recipients all the rights that you have. You +must make sure that they, too, receive or can get the source code. And you +must show them these terms so they know their rights. + +We protect your rights with two steps: (1) copyright the software, and (2) +offer you this license which gives you legal permission to copy, distribute +and/or modify the software. + +Also, for each author's protection and ours, we want to make certain that +everyone understands that there is no warranty for this free software. If +the software is modified by someone else and passed on, we want its recipients +to know that what they have is not the original, so that any problems introduced +by others will not reflect on the original authors' reputations. + +Finally, any free program is threatened constantly by software patents. We +wish to avoid the danger that redistributors of a free program will individually +obtain patent licenses, in effect making the program proprietary. To prevent +this, we have made it clear that any patent must be licensed for everyone's +free use or not licensed at all. + +The precise terms and conditions for copying, distribution and modification +follow. + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +0. This License applies to any program or other work which contains a notice +placed by the copyright holder saying it may be distributed under the terms +of this General Public License. The "Program", below, refers to any such program +or work, and a "work based on the Program" means either the Program or any +derivative work under copyright law: that is to say, a work containing the +Program or a portion of it, either verbatim or with modifications and/or translated +into another language. (Hereinafter, translation is included without limitation +in the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not covered +by this License; they are outside its scope. The act of running the Program +is not restricted, and the output from the Program is covered only if its +contents constitute a work based on the Program (independent of having been +made by running the Program). Whether that is true depends on what the Program +does. + +1. You may copy and distribute verbatim copies of the Program's source code +as you receive it, in any medium, provided that you conspicuously and appropriately +publish on each copy an appropriate copyright notice and disclaimer of warranty; +keep intact all the notices that refer to this License and to the absence +of any warranty; and give any other recipients of the Program a copy of this +License along with the Program. + +You may charge a fee for the physical act of transferring a copy, and you +may at your option offer warranty protection in exchange for a fee. + +2. You may modify your copy or copies of the Program or any portion of it, +thus forming a work based on the Program, and copy and distribute such modifications +or work under the terms of Section 1 above, provided that you also meet all +of these conditions: + +a) You must cause the modified files to carry prominent notices stating that +you changed the files and the date of any change. + +b) You must cause any work that you distribute or publish, that in whole or +in part contains or is derived from the Program or any part thereof, to be +licensed as a whole at no charge to all third parties under the terms of this +License. + +c) If the modified program normally reads commands interactively when run, +you must cause it, when started running for such interactive use in the most +ordinary way, to print or display an announcement including an appropriate +copyright notice and a notice that there is no warranty (or else, saying that +you provide a warranty) and that users may redistribute the program under +these conditions, and telling the user how to view a copy of this License. +(Exception: if the Program itself is interactive but does not normally print +such an announcement, your work based on the Program is not required to print +an announcement.) + +These requirements apply to the modified work as a whole. If identifiable +sections of that work are not derived from the Program, and can be reasonably +considered independent and separate works in themselves, then this License, +and its terms, do not apply to those sections when you distribute them as +separate works. But when you distribute the same sections as part of a whole +which is a work based on the Program, the distribution of the whole must be +on the terms of this License, whose permissions for other licensees extend +to the entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest your +rights to work written entirely by you; rather, the intent is to exercise +the right to control the distribution of derivative or collective works based +on the Program. + +In addition, mere aggregation of another work not based on the Program with +the Program (or with a work based on the Program) on a volume of a storage +or distribution medium does not bring the other work under the scope of this +License. + +3. You may copy and distribute the Program (or a work based on it, under Section +2) in object code or executable form under the terms of Sections 1 and 2 above +provided that you also do one of the following: + +a) Accompany it with the complete corresponding machine-readable source code, +which must be distributed under the terms of Sections 1 and 2 above on a medium +customarily used for software interchange; or, + +b) Accompany it with a written offer, valid for at least three years, to give +any third party, for a charge no more than your cost of physically performing +source distribution, a complete machine-readable copy of the corresponding +source code, to be distributed under the terms of Sections 1 and 2 above on +a medium customarily used for software interchange; or, + +c) Accompany it with the information you received as to the offer to distribute +corresponding source code. (This alternative is allowed only for noncommercial +distribution and only if you received the program in object code or executable +form with such an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for making +modifications to it. For an executable work, complete source code means all +the source code for all modules it contains, plus any associated interface +definition files, plus the scripts used to control compilation and installation +of the executable. However, as a special exception, the source code distributed +need not include anything that is normally distributed (in either source or +binary form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component itself +accompanies the executable. + +If distribution of executable or object code is made by offering access to +copy from a designated place, then offering equivalent access to copy the +source code from the same place counts as distribution of the source code, +even though third parties are not compelled to copy the source along with +the object code. + +4. You may not copy, modify, sublicense, or distribute the Program except +as expressly provided under this License. Any attempt otherwise to copy, modify, +sublicense or distribute the Program is void, and will automatically terminate +your rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses terminated +so long as such parties remain in full compliance. + +5. You are not required to accept this License, since you have not signed +it. However, nothing else grants you permission to modify or distribute the +Program or its derivative works. These actions are prohibited by law if you +do not accept this License. Therefore, by modifying or distributing the Program +(or any work based on the Program), you indicate your acceptance of this License +to do so, and all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + +6. Each time you redistribute the Program (or any work based on the Program), +the recipient automatically receives a license from the original licensor +to copy, distribute or modify the Program subject to these terms and conditions. +You may not impose any further restrictions on the recipients' exercise of +the rights granted herein. You are not responsible for enforcing compliance +by third parties to this License. + +7. If, as a consequence of a court judgment or allegation of patent infringement +or for any other reason (not limited to patent issues), conditions are imposed +on you (whether by court order, agreement or otherwise) that contradict the +conditions of this License, they do not excuse you from the conditions of +this License. If you cannot distribute so as to satisfy simultaneously your +obligations under this License and any other pertinent obligations, then as +a consequence you may not distribute the Program at all. For example, if a +patent license would not permit royalty-free redistribution of the Program +by all those who receive copies directly or indirectly through you, then the +only way you could satisfy both it and this License would be to refrain entirely +from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply and +the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any patents +or other property right claims or to contest validity of any such claims; +this section has the sole purpose of protecting the integrity of the free +software distribution system, which is implemented by public license practices. +Many people have made generous contributions to the wide range of software +distributed through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing to +distribute software through any other system and a licensee cannot impose +that choice. + +This section is intended to make thoroughly clear what is believed to be a +consequence of the rest of this License. + +8. If the distribution and/or use of the Program is restricted in certain +countries either by patents or by copyrighted interfaces, the original copyright +holder who places the Program under this License may add an explicit geographical +distribution limitation excluding those countries, so that distribution is +permitted only in or among countries not thus excluded. In such case, this +License incorporates the limitation as if written in the body of this License. + +9. The Free Software Foundation may publish revised and/or new versions of +the General Public License from time to time. Such new versions will be similar +in spirit to the present version, but may differ in detail to address new +problems or concerns. + +Each version is given a distinguishing version number. If the Program specifies +a version number of this License which applies to it and "any later version", +you have the option of following the terms and conditions either of that version +or of any later version published by the Free Software Foundation. If the +Program does not specify a version number of this License, you may choose +any version ever published by the Free Software Foundation. + +10. If you wish to incorporate parts of the Program into other free programs +whose distribution conditions are different, write to the author to ask for +permission. For software which is copyrighted by the Free Software Foundation, +write to the Free Software Foundation; we sometimes make exceptions for this. +Our decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing and reuse +of software generally. + + NO WARRANTY + +11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR +THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE +STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM +"AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, +BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE +OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE +OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA +OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES +OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH +HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +END OF TERMS AND CONDITIONS + +How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest possible +use to the public, the best way to achieve this is to make it free software +which everyone can redistribute and change under these terms. + +To do so, attach the following notices to the program. It is safest to attach +them to the start of each source file to most effectively convey the exclusion +of warranty; and each file should have at least the "copyright" line and a +pointer to where the full notice is found. + + + +Copyright (C) + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; either version 2 of the License, or (at your option) any later +version. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., 51 Franklin +Street, Fifth Floor, Boston, MA 02110-1301, USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this when +it starts in an interactive mode: + +Gnomovision version 69, Copyright (C) year name of author Gnomovision comes +with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, +and you are welcome to redistribute it under certain conditions; type `show +c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may be +called something other than `show w' and `show c'; they could even be mouse-clicks +or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your school, +if any, to sign a "copyright disclaimer" for the program, if necessary. Here +is a sample; alter the names: + +Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' +(which makes passes at compilers) written by James Hacker. + +, 1 April 1989 Ty Coon, President of Vice This General +Public License does not permit incorporating your program into proprietary +programs. If your program is a subroutine library, you may consider it more +useful to permit linking proprietary applications with the library. If this +is what you want to do, use the GNU Lesser General Public License instead +of this License. diff --git a/spider-cam/libcamera/LICENSES/GPL-2.0.txt b/spider-cam/libcamera/LICENSES/GPL-2.0.txt new file mode 120000 index 0000000..0a87fbd --- /dev/null +++ b/spider-cam/libcamera/LICENSES/GPL-2.0.txt @@ -0,0 +1 @@ +GPL-2.0-only.txt \ No newline at end of file diff --git a/spider-cam/libcamera/LICENSES/LGPL-2.1-or-later.txt b/spider-cam/libcamera/LICENSES/LGPL-2.1-or-later.txt new file mode 100644 index 0000000..04bb156 --- /dev/null +++ b/spider-cam/libcamera/LICENSES/LGPL-2.1-or-later.txt @@ -0,0 +1,468 @@ +GNU LESSER GENERAL PUBLIC LICENSE + +Version 2.1, February 1999 + +Copyright (C) 1991, 1999 Free Software Foundation, Inc. + +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Everyone is permitted to copy and distribute verbatim copies of this license +document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts as the +successor of the GNU Library Public License, version 2, hence the version +number 2.1.] + +Preamble + +The licenses for most software are designed to take away your freedom to share +and change it. By contrast, the GNU General Public Licenses are intended to +guarantee your freedom to share and change free software--to make sure the +software is free for all its users. + +This license, the Lesser General Public License, applies to some specially +designated software packages--typically libraries--of the Free Software Foundation +and other authors who decide to use it. You can use it too, but we suggest +you first think carefully about whether this license or the ordinary General +Public License is the better strategy to use in any particular case, based +on the explanations below. + +When we speak of free software, we are referring to freedom of use, not price. +Our General Public Licenses are designed to make sure that you have the freedom +to distribute copies of free software (and charge for this service if you +wish); that you receive source code or can get it if you want it; that you +can change the software and use pieces of it in new free programs; and that +you are informed that you can do these things. + +To protect your rights, we need to make restrictions that forbid distributors +to deny you these rights or to ask you to surrender these rights. These restrictions +translate to certain responsibilities for you if you distribute copies of +the library or if you modify it. + +For example, if you distribute copies of the library, whether gratis or for +a fee, you must give the recipients all the rights that we gave you. You must +make sure that they, too, receive or can get the source code. If you link +other code with the library, you must provide complete object files to the +recipients, so that they can relink them with the library after making changes +to the library and recompiling it. And you must show them these terms so they +know their rights. + +We protect your rights with a two-step method: (1) we copyright the library, +and (2) we offer you this license, which gives you legal permission to copy, +distribute and/or modify the library. + +To protect each distributor, we want to make it very clear that there is no +warranty for the free library. Also, if the library is modified by someone +else and passed on, the recipients should know that what they have is not +the original version, so that the original author's reputation will not be +affected by problems that might be introduced by others. + +Finally, software patents pose a constant threat to the existence of any free +program. We wish to make sure that a company cannot effectively restrict the +users of a free program by obtaining a restrictive license from a patent holder. +Therefore, we insist that any patent license obtained for a version of the +library must be consistent with the full freedom of use specified in this +license. + +Most GNU software, including some libraries, is covered by the ordinary GNU +General Public License. This license, the GNU Lesser General Public License, +applies to certain designated libraries, and is quite different from the ordinary +General Public License. We use this license for certain libraries in order +to permit linking those libraries into non-free programs. + +When a program is linked with a library, whether statically or using a shared +library, the combination of the two is legally speaking a combined work, a +derivative of the original library. The ordinary General Public License therefore +permits such linking only if the entire combination fits its criteria of freedom. +The Lesser General Public License permits more lax criteria for linking other +code with the library. + +We call this license the "Lesser" General Public License because it does Less +to protect the user's freedom than the ordinary General Public License. It +also provides other free software developers Less of an advantage over competing +non-free programs. These disadvantages are the reason we use the ordinary +General Public License for many libraries. However, the Lesser license provides +advantages in certain special circumstances. + +For example, on rare occasions, there may be a special need to encourage the +widest possible use of a certain library, so that it becomes a de-facto standard. +To achieve this, non-free programs must be allowed to use the library. A more +frequent case is that a free library does the same job as widely used non-free +libraries. In this case, there is little to gain by limiting the free library +to free software only, so we use the Lesser General Public License. + +In other cases, permission to use a particular library in non-free programs +enables a greater number of people to use a large body of free software. For +example, permission to use the GNU C Library in non-free programs enables +many more people to use the whole GNU operating system, as well as its variant, +the GNU/Linux operating system. + +Although the Lesser General Public License is Less protective of the users' +freedom, it does ensure that the user of a program that is linked with the +Library has the freedom and the wherewithal to run that program using a modified +version of the Library. + +The precise terms and conditions for copying, distribution and modification +follow. Pay close attention to the difference between a "work based on the +library" and a "work that uses the library". The former contains code derived +from the library, whereas the latter must be combined with the library in +order to run. + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +0. This License Agreement applies to any software library or other program +which contains a notice placed by the copyright holder or other authorized +party saying it may be distributed under the terms of this Lesser General +Public License (also called "this License"). Each licensee is addressed as +"you". + +A "library" means a collection of software functions and/or data prepared +so as to be conveniently linked with application programs (which use some +of those functions and data) to form executables. + +The "Library", below, refers to any such software library or work which has +been distributed under these terms. A "work based on the Library" means either +the Library or any derivative work under copyright law: that is to say, a +work containing the Library or a portion of it, either verbatim or with modifications +and/or translated straightforwardly into another language. (Hereinafter, translation +is included without limitation in the term "modification".) + +"Source code" for a work means the preferred form of the work for making modifications +to it. For a library, complete source code means all the source code for all +modules it contains, plus any associated interface definition files, plus +the scripts used to control compilation and installation of the library. + +Activities other than copying, distribution and modification are not covered +by this License; they are outside its scope. The act of running a program +using the Library is not restricted, and output from such a program is covered +only if its contents constitute a work based on the Library (independent of +the use of the Library in a tool for writing it). Whether that is true depends +on what the Library does and what the program that uses the Library does. + +1. You may copy and distribute verbatim copies of the Library's complete source +code as you receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice and disclaimer +of warranty; keep intact all the notices that refer to this License and to +the absence of any warranty; and distribute a copy of this License along with +the Library. + +You may charge a fee for the physical act of transferring a copy, and you +may at your option offer warranty protection in exchange for a fee. + +2. You may modify your copy or copies of the Library or any portion of it, +thus forming a work based on the Library, and copy and distribute such modifications +or work under the terms of Section 1 above, provided that you also meet all +of these conditions: + + a) The modified work must itself be a software library. + +b) You must cause the files modified to carry prominent notices stating that +you changed the files and the date of any change. + +c) You must cause the whole of the work to be licensed at no charge to all +third parties under the terms of this License. + +d) If a facility in the modified Library refers to a function or a table of +data to be supplied by an application program that uses the facility, other +than as an argument passed when the facility is invoked, then you must make +a good faith effort to ensure that, in the event an application does not supply +such function or table, the facility still operates, and performs whatever +part of its purpose remains meaningful. + +(For example, a function in a library to compute square roots has a purpose +that is entirely well-defined independent of the application. Therefore, Subsection +2d requires that any application-supplied function or table used by this function +must be optional: if the application does not supply it, the square root function +must still compute square roots.) + +These requirements apply to the modified work as a whole. If identifiable +sections of that work are not derived from the Library, and can be reasonably +considered independent and separate works in themselves, then this License, +and its terms, do not apply to those sections when you distribute them as +separate works. But when you distribute the same sections as part of a whole +which is a work based on the Library, the distribution of the whole must be +on the terms of this License, whose permissions for other licensees extend +to the entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest your +rights to work written entirely by you; rather, the intent is to exercise +the right to control the distribution of derivative or collective works based +on the Library. + +In addition, mere aggregation of another work not based on the Library with +the Library (or with a work based on the Library) on a volume of a storage +or distribution medium does not bring the other work under the scope of this +License. + +3. You may opt to apply the terms of the ordinary GNU General Public License +instead of this License to a given copy of the Library. To do this, you must +alter all the notices that refer to this License, so that they refer to the +ordinary GNU General Public License, version 2, instead of to this License. +(If a newer version than version 2 of the ordinary GNU General Public License +has appeared, then you can specify that version instead if you wish.) Do not +make any other change in these notices. + +Once this change is made in a given copy, it is irreversible for that copy, +so the ordinary GNU General Public License applies to all subsequent copies +and derivative works made from that copy. + +This option is useful when you wish to copy part of the code of the Library +into a program that is not a library. + +4. You may copy and distribute the Library (or a portion or derivative of +it, under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you accompany it with the complete corresponding +machine-readable source code, which must be distributed under the terms of +Sections 1 and 2 above on a medium customarily used for software interchange. + +If distribution of object code is made by offering access to copy from a designated +place, then offering equivalent access to copy the source code from the same +place satisfies the requirement to distribute the source code, even though +third parties are not compelled to copy the source along with the object code. + +5. A program that contains no derivative of any portion of the Library, but +is designed to work with the Library by being compiled or linked with it, +is called a "work that uses the Library". Such a work, in isolation, is not +a derivative work of the Library, and therefore falls outside the scope of +this License. + +However, linking a "work that uses the Library" with the Library creates an +executable that is a derivative of the Library (because it contains portions +of the Library), rather than a "work that uses the library". The executable +is therefore covered by this License. Section 6 states terms for distribution +of such executables. + +When a "work that uses the Library" uses material from a header file that +is part of the Library, the object code for the work may be a derivative work +of the Library even though the source code is not. Whether this is true is +especially significant if the work can be linked without the Library, or if +the work is itself a library. The threshold for this to be true is not precisely +defined by law. + +If such an object file uses only numerical parameters, data structure layouts +and accessors, and small macros and small inline functions (ten lines or less +in length), then the use of the object file is unrestricted, regardless of +whether it is legally a derivative work. (Executables containing this object +code plus portions of the Library will still fall under Section 6.) + +Otherwise, if the work is a derivative of the Library, you may distribute +the object code for the work under the terms of Section 6. Any executables +containing that work also fall under Section 6, whether or not they are linked +directly with the Library itself. + +6. As an exception to the Sections above, you may also combine or link a "work +that uses the Library" with the Library to produce a work containing portions +of the Library, and distribute that work under terms of your choice, provided +that the terms permit modification of the work for the customer's own use +and reverse engineering for debugging such modifications. + +You must give prominent notice with each copy of the work that the Library +is used in it and that the Library and its use are covered by this License. +You must supply a copy of this License. If the work during execution displays +copyright notices, you must include the copyright notice for the Library among +them, as well as a reference directing the user to the copy of this License. +Also, you must do one of these things: + +a) Accompany the work with the complete corresponding machine-readable source +code for the Library including whatever changes were used in the work (which +must be distributed under Sections 1 and 2 above); and, if the work is an +executable linked with the Library, with the complete machine-readable "work +that uses the Library", as object code and/or source code, so that the user +can modify the Library and then relink to produce a modified executable containing +the modified Library. (It is understood that the user who changes the contents +of definitions files in the Library will not necessarily be able to recompile +the application to use the modified definitions.) + +b) Use a suitable shared library mechanism for linking with the Library. A +suitable mechanism is one that (1) uses at run time a copy of the library +already present on the user's computer system, rather than copying library +functions into the executable, and (2) will operate properly with a modified +version of the library, if the user installs one, as long as the modified +version is interface-compatible with the version that the work was made with. + +c) Accompany the work with a written offer, valid for at least three years, +to give the same user the materials specified in Subsection 6a, above, for +a charge no more than the cost of performing this distribution. + +d) If distribution of the work is made by offering access to copy from a designated +place, offer equivalent access to copy the above specified materials from +the same place. + +e) Verify that the user has already received a copy of these materials or +that you have already sent this user a copy. + +For an executable, the required form of the "work that uses the Library" must +include any data and utility programs needed for reproducing the executable +from it. However, as a special exception, the materials to be distributed +need not include anything that is normally distributed (in either source or +binary form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component itself +accompanies the executable. + +It may happen that this requirement contradicts the license restrictions of +other proprietary libraries that do not normally accompany the operating system. +Such a contradiction means you cannot use both them and the Library together +in an executable that you distribute. + +7. You may place library facilities that are a work based on the Library side-by-side +in a single library together with other library facilities not covered by +this License, and distribute such a combined library, provided that the separate +distribution of the work based on the Library and of the other library facilities +is otherwise permitted, and provided that you do these two things: + +a) Accompany the combined library with a copy of the same work based on the +Library, uncombined with any other library facilities. This must be distributed +under the terms of the Sections above. + +b) Give prominent notice with the combined library of the fact that part of +it is a work based on the Library, and explaining where to find the accompanying +uncombined form of the same work. + +8. You may not copy, modify, sublicense, link with, or distribute the Library +except as expressly provided under this License. Any attempt otherwise to +copy, modify, sublicense, link with, or distribute the Library is void, and +will automatically terminate your rights under this License. However, parties +who have received copies, or rights, from you under this License will not +have their licenses terminated so long as such parties remain in full compliance. + +9. You are not required to accept this License, since you have not signed +it. However, nothing else grants you permission to modify or distribute the +Library or its derivative works. These actions are prohibited by law if you +do not accept this License. Therefore, by modifying or distributing the Library +(or any work based on the Library), you indicate your acceptance of this License +to do so, and all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + +10. Each time you redistribute the Library (or any work based on the Library), +the recipient automatically receives a license from the original licensor +to copy, distribute, link with or modify the Library subject to these terms +and conditions. You may not impose any further restrictions on the recipients' +exercise of the rights granted herein. You are not responsible for enforcing +compliance by third parties with this License. + +11. If, as a consequence of a court judgment or allegation of patent infringement +or for any other reason (not limited to patent issues), conditions are imposed +on you (whether by court order, agreement or otherwise) that contradict the +conditions of this License, they do not excuse you from the conditions of +this License. If you cannot distribute so as to satisfy simultaneously your +obligations under this License and any other pertinent obligations, then as +a consequence you may not distribute the Library at all. For example, if a +patent license would not permit royalty-free redistribution of the Library +by all those who receive copies directly or indirectly through you, then the +only way you could satisfy both it and this License would be to refrain entirely +from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any patents +or other property right claims or to contest validity of any such claims; +this section has the sole purpose of protecting the integrity of the free +software distribution system which is implemented by public license practices. +Many people have made generous contributions to the wide range of software +distributed through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing to +distribute software through any other system and a licensee cannot impose +that choice. + +This section is intended to make thoroughly clear what is believed to be a +consequence of the rest of this License. + +12. If the distribution and/or use of the Library is restricted in certain +countries either by patents or by copyrighted interfaces, the original copyright +holder who places the Library under this License may add an explicit geographical +distribution limitation excluding those countries, so that distribution is +permitted only in or among countries not thus excluded. In such case, this +License incorporates the limitation as if written in the body of this License. + +13. The Free Software Foundation may publish revised and/or new versions of +the Lesser General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to address +new problems or concerns. + +Each version is given a distinguishing version number. If the Library specifies +a version number of this License which applies to it and "any later version", +you have the option of following the terms and conditions either of that version +or of any later version published by the Free Software Foundation. If the +Library does not specify a license version number, you may choose any version +ever published by the Free Software Foundation. + +14. If you wish to incorporate parts of the Library into other free programs +whose distribution conditions are incompatible with these, write to the author +to ask for permission. For software which is copyrighted by the Free Software +Foundation, write to the Free Software Foundation; we sometimes make exceptions +for this. Our decision will be guided by the two goals of preserving the free +status of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + +15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR +THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE +STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY +"AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, +BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE +OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE +THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE +OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA +OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES +OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH +HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +END OF TERMS AND CONDITIONS + +How to Apply These Terms to Your New Libraries + +If you develop a new library, and you want it to be of the greatest possible +use to the public, we recommend making it free software that everyone can +redistribute and change. You can do so by permitting redistribution under +these terms (or, alternatively, under the terms of the ordinary General Public +License). + +To apply these terms, attach the following notices to the library. It is safest +to attach them to the start of each source file to most effectively convey +the exclusion of warranty; and each file should have at least the "copyright" +line and a pointer to where the full notice is found. + + + +Copyright (C) + +This library is free software; you can redistribute it and/or modify it under +the terms of the GNU Lesser General Public License as published by the Free +Software Foundation; either version 2.1 of the License, or (at your option) +any later version. + +This library is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +details. + +You should have received a copy of the GNU Lesser General Public License along +with this library; if not, write to the Free Software Foundation, Inc., 51 +Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your school, +if any, to sign a "copyright disclaimer" for the library, if necessary. Here +is a sample; alter the names: + +Yoyodyne, Inc., hereby disclaims all copyright interest in + +the library `Frob' (a library for tweaking knobs) written + +by James Random Hacker. + +< signature of Ty Coon > , 1 April 1990 + +Ty Coon, President of Vice + +That's all there is to it! diff --git a/spider-cam/libcamera/LICENSES/Linux-syscall-note.txt b/spider-cam/libcamera/LICENSES/Linux-syscall-note.txt new file mode 100644 index 0000000..82054e6 --- /dev/null +++ b/spider-cam/libcamera/LICENSES/Linux-syscall-note.txt @@ -0,0 +1,5 @@ +NOTE! This copyright does *not* cover user programs that use kernel services by normal system calls - this is merely considered normal use of the kernel, and does *not* fall under the heading of "derived work". Also note that the GPL below is copyrighted by the Free Software Foundation, but the instance of code that it refers to (the Linux kernel) is copyrighted by me and others who actually wrote it. + +Also note that the only valid version of the GPL as far as the kernel is concerned is _this_ particular version of the license (ie v2, not v2.2 or v3.x or whatever), unless explicitly otherwise stated. + +Linus Torvalds diff --git a/spider-cam/libcamera/LICENSES/MIT.txt b/spider-cam/libcamera/LICENSES/MIT.txt new file mode 100644 index 0000000..204b93d --- /dev/null +++ b/spider-cam/libcamera/LICENSES/MIT.txt @@ -0,0 +1,19 @@ +MIT License Copyright (c) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: + +The above copyright notice and this permission notice (including the next +paragraph) shall be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF +OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/spider-cam/libcamera/README.rst b/spider-cam/libcamera/README.rst new file mode 100644 index 0000000..1da7a3d --- /dev/null +++ b/spider-cam/libcamera/README.rst @@ -0,0 +1,202 @@ +.. SPDX-License-Identifier: CC-BY-SA-4.0 + +.. section-begin-libcamera + +=========== + libcamera +=========== + +**A complex camera support library for Linux, Android, and ChromeOS** + +Cameras are complex devices that need heavy hardware image processing +operations. Control of the processing is based on advanced algorithms that must +run on a programmable processor. This has traditionally been implemented in a +dedicated MCU in the camera, but in embedded devices algorithms have been moved +to the main CPU to save cost. Blurring the boundary between camera devices and +Linux often left the user with no other option than a vendor-specific +closed-source solution. + +To address this problem the Linux media community has very recently started +collaboration with the industry to develop a camera stack that will be +open-source-friendly while still protecting vendor core IP. libcamera was born +out of that collaboration and will offer modern camera support to Linux-based +systems, including traditional Linux distributions, ChromeOS and Android. + +.. section-end-libcamera +.. section-begin-getting-started + +Getting Started +--------------- + +To fetch the sources, build and install: + +.. code:: + + git clone https://git.libcamera.org/libcamera/libcamera.git + cd libcamera + meson setup build + ninja -C build install + +Dependencies +~~~~~~~~~~~~ + +The following Debian/Ubuntu packages are required for building libcamera. +Other distributions may have differing package names: + +A C++ toolchain: [required] + Either {g++, clang} + +Meson Build system: [required] + meson (>= 0.60) ninja-build pkg-config + +for the libcamera core: [required] + libyaml-dev python3-yaml python3-ply python3-jinja2 + +for IPA module signing: [recommended] + Either libgnutls28-dev or libssl-dev, openssl + + Without IPA module signing, all IPA modules will be isolated in a + separate process. This adds an unnecessary extra overhead at runtime. + +for improved debugging: [optional] + libdw-dev libunwind-dev + + libdw and libunwind provide backtraces to help debugging assertion + failures. Their functions overlap, libdw provides the most detailed + information, and libunwind is not needed if both libdw and the glibc + backtrace() function are available. + +for device hotplug enumeration: [optional] + libudev-dev + +for documentation: [optional] + python3-sphinx doxygen graphviz texlive-latex-extra + +for gstreamer: [optional] + libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev + +for Python bindings: [optional] + libpython3-dev pybind11-dev + +for cam: [optional] + libevent-dev is required to support cam, however the following + optional dependencies bring more functionality to the cam test + tool: + + - libdrm-dev: Enables the KMS sink + - libjpeg-dev: Enables MJPEG on the SDL sink + - libsdl2-dev: Enables the SDL sink + +for qcam: [optional] + libtiff-dev qtbase5-dev qttools5-dev-tools + +for tracing with lttng: [optional] + liblttng-ust-dev python3-jinja2 lttng-tools + +for android: [optional] + libexif-dev libjpeg-dev + +for Python bindings: [optional] + pybind11-dev + +for lc-compliance: [optional] + libevent-dev libgtest-dev + +for abi-compat.sh: [optional] + abi-compliance-checker + +Basic testing with cam utility +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ``cam`` utility can be used for basic testing. You can list the cameras +detected on the system with ``cam -l``, and capture ten frames from the first +camera and save them to disk with ``cam -c 1 --capture=10 --file``. See +``cam -h`` for more information about the ``cam`` tool. + +In case of problems, a detailed debug log can be obtained from libcamera by +setting the ``LIBCAMERA_LOG_LEVELS`` environment variable: + +.. code:: + + :~$ LIBCAMERA_LOG_LEVELS=*:DEBUG cam -l + +Using GStreamer plugin +~~~~~~~~~~~~~~~~~~~~~~ + +To use the GStreamer plugin from the source tree, use the meson ``devenv`` +command. This will create a new shell instance with the ``GST_PLUGIN_PATH`` +environment set accordingly. + +.. code:: + + meson devenv -C build + +The debugging tool ``gst-launch-1.0`` can be used to construct a pipeline and +test it. The following pipeline will stream from the camera named "Camera 1" +onto the OpenGL accelerated display element on your system. + +.. code:: + + gst-launch-1.0 libcamerasrc camera-name="Camera 1" ! queue ! glimagesink + +To show the first camera found you can omit the camera-name property, or you +can list the cameras and their capabilities using: + +.. code:: + + gst-device-monitor-1.0 Video + +This will also show the supported stream sizes which can be manually selected +if desired with a pipeline such as: + +.. code:: + + gst-launch-1.0 libcamerasrc ! 'video/x-raw,width=1280,height=720' ! \ + queue ! glimagesink + +The libcamerasrc element has two log categories, named libcamera-provider (for +the video device provider) and libcamerasrc (for the operation of the camera). +All corresponding debug messages can be enabled by setting the ``GST_DEBUG`` +environment variable to ``libcamera*:7``. + +Presently, to prevent element negotiation failures it is required to specify +the colorimetry and framerate as part of your pipeline construction. For +instance, to capture and encode as a JPEG stream and receive on another device +the following example could be used as a starting point: + +.. code:: + + gst-launch-1.0 libcamerasrc ! \ + video/x-raw,colorimetry=bt709,format=NV12,width=1280,height=720,framerate=30/1 ! \ + queue ! jpegenc ! multipartmux ! \ + tcpserversink host=0.0.0.0 port=5000 + +Which can be received on another device over the network with: + +.. code:: + + gst-launch-1.0 tcpclientsrc host=$DEVICE_IP port=5000 ! \ + multipartdemux ! jpegdec ! autovideosink + +.. section-end-getting-started + +Troubleshooting +~~~~~~~~~~~~~~~ + +Several users have reported issues with meson installation, crux of the issue +is a potential version mismatch between the version that root uses, and the +version that the normal user uses. On calling `ninja -C build`, it can't find +the build.ninja module. This is a snippet of the error message. + +:: + + ninja: Entering directory `build' + ninja: error: loading 'build.ninja': No such file or directory + +This can be solved in two ways: + +1. Don't install meson again if it is already installed system-wide. + +2. If a version of meson which is different from the system-wide version is + already installed, uninstall that meson using pip3, and install again without + the --user argument. diff --git a/spider-cam/libcamera/include/android/hardware/libhardware/include/hardware/camera3.h b/spider-cam/libcamera/include/android/hardware/libhardware/include/hardware/camera3.h new file mode 100644 index 0000000..fd1e207 --- /dev/null +++ b/spider-cam/libcamera/include/android/hardware/libhardware/include/hardware/camera3.h @@ -0,0 +1,3269 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* + * Copyright (C) 2013-2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_INCLUDE_CAMERA3_H +#define ANDROID_INCLUDE_CAMERA3_H + +#include +#include "camera_common.h" + +/** + * Camera device HAL 3.5[ CAMERA_DEVICE_API_VERSION_3_5 ] + * + * This is the current recommended version of the camera device HAL. + * + * Supports the android.hardware.Camera API, and as of v3.2, the + * android.hardware.camera2 API as LIMITED or above hardware level. + * + * Camera devices that support this version of the HAL must return + * CAMERA_DEVICE_API_VERSION_3_5 in camera_device_t.common.version and in + * camera_info_t.device_version (from camera_module_t.get_camera_info). + * + * CAMERA_DEVICE_API_VERSION_3_3 and above: + * Camera modules that may contain version 3.3 or above devices must + * implement at least version 2.2 of the camera module interface (as defined + * by camera_module_t.common.module_api_version). + * + * CAMERA_DEVICE_API_VERSION_3_2: + * Camera modules that may contain version 3.2 devices must implement at + * least version 2.2 of the camera module interface (as defined by + * camera_module_t.common.module_api_version). + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * Camera modules that may contain version 3.1 (or 3.0) devices must + * implement at least version 2.0 of the camera module interface + * (as defined by camera_module_t.common.module_api_version). + * + * See camera_common.h for more versioning details. + * + * Documentation index: + * S1. Version history + * S2. Startup and operation sequencing + * S3. Operational modes + * S4. 3A modes and state machines + * S5. Cropping + * S6. Error management + * S7. Key Performance Indicator (KPI) glossary + * S8. Sample Use Cases + * S9. Notes on Controls and Metadata + * S10. Reprocessing flow and controls + */ + +/** + * S1. Version history: + * + * 1.0: Initial Android camera HAL (Android 4.0) [camera.h]: + * + * - Converted from C++ CameraHardwareInterface abstraction layer. + * + * - Supports android.hardware.Camera API. + * + * 2.0: Initial release of expanded-capability HAL (Android 4.2) [camera2.h]: + * + * - Sufficient for implementing existing android.hardware.Camera API. + * + * - Allows for ZSL queue in camera service layer + * + * - Not tested for any new features such manual capture control, Bayer RAW + * capture, reprocessing of RAW data. + * + * 3.0: First revision of expanded-capability HAL: + * + * - Major version change since the ABI is completely different. No change to + * the required hardware capabilities or operational model from 2.0. + * + * - Reworked input request and stream queue interfaces: Framework calls into + * HAL with next request and stream buffers already dequeued. Sync framework + * support is included, necessary for efficient implementations. + * + * - Moved triggers into requests, most notifications into results. + * + * - Consolidated all callbacks into framework into one structure, and all + * setup methods into a single initialize() call. + * + * - Made stream configuration into a single call to simplify stream + * management. Bidirectional streams replace STREAM_FROM_STREAM construct. + * + * - Limited mode semantics for older/limited hardware devices. + * + * 3.1: Minor revision of expanded-capability HAL: + * + * - configure_streams passes consumer usage flags to the HAL. + * + * - flush call to drop all in-flight requests/buffers as fast as possible. + * + * 3.2: Minor revision of expanded-capability HAL: + * + * - Deprecates get_metadata_vendor_tag_ops. Please use get_vendor_tag_ops + * in camera_common.h instead. + * + * - register_stream_buffers deprecated. All gralloc buffers provided + * by framework to HAL in process_capture_request may be new at any time. + * + * - add partial result support. process_capture_result may be called + * multiple times with a subset of the available result before the full + * result is available. + * + * - add manual template to camera3_request_template. The applications may + * use this template to control the capture settings directly. + * + * - Rework the bidirectional and input stream specifications. + * + * - change the input buffer return path. The buffer is returned in + * process_capture_result instead of process_capture_request. + * + * 3.3: Minor revision of expanded-capability HAL: + * + * - OPAQUE and YUV reprocessing API updates. + * + * - Basic support for depth output buffers. + * + * - Addition of data_space field to camera3_stream_t. + * + * - Addition of rotation field to camera3_stream_t. + * + * - Addition of camera3 stream configuration operation mode to camera3_stream_configuration_t + * + * 3.4: Minor additions to supported metadata and changes to data_space support + * + * - Add ANDROID_SENSOR_OPAQUE_RAW_SIZE static metadata as mandatory if + * RAW_OPAQUE format is supported. + * + * - Add ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE static metadata as + * mandatory if any RAW format is supported + * + * - Switch camera3_stream_t data_space field to a more flexible definition, + * using the version 0 definition of dataspace encoding. + * + * - General metadata additions which are available to use for HALv3.2 or + * newer: + * - ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_3 + * - ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST + * - ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE + * - ANDROID_SENSOR_DYNAMIC_BLACK_LEVEL + * - ANDROID_SENSOR_DYNAMIC_WHITE_LEVEL + * - ANDROID_SENSOR_OPAQUE_RAW_SIZE + * - ANDROID_SENSOR_OPTICAL_BLACK_REGIONS + * + * 3.5: Minor revisions to support session parameters and logical multi camera: + * + * - Add ANDROID_REQUEST_AVAILABLE_SESSION_KEYS static metadata, which is + * optional for implementations that want to support session parameters. If support is + * needed, then Hal should populate the list with all available capture request keys + * that can cause severe processing delays when modified by client. Typical examples + * include parameters that require time-consuming HW re-configuration or internal camera + * pipeline update. + * + * - Add a session parameter field to camera3_stream_configuration which can be populated + * by clients with initial values for the keys found in ANDROID_REQUEST_AVAILABLE_SESSION_KEYS. + * + * - Metadata additions for logical multi camera capability: + * - ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA + * - ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS + * - ANDROID_LOGICAL_MULTI_CAMERA_SYNC_TYPE + * + * - Add physical camera id field in camera3_stream, so that for a logical + * multi camera, the application has the option to specify which physical camera + * a particular stream is configured on. + * + * - Add physical camera id and settings field in camera3_capture_request, so that + * for a logical multi camera, the application has the option to specify individual + * settings for a particular physical device. + * + */ + +/** + * S2. Startup and general expected operation sequence: + * + * 1. Framework calls camera_module_t->common.open(), which returns a + * hardware_device_t structure. + * + * 2. Framework inspects the hardware_device_t->version field, and instantiates + * the appropriate handler for that version of the camera hardware device. In + * case the version is CAMERA_DEVICE_API_VERSION_3_0, the device is cast to + * a camera3_device_t. + * + * 3. Framework calls camera3_device_t->ops->initialize() with the framework + * callback function pointers. This will only be called this one time after + * open(), before any other functions in the ops structure are called. + * + * 4. The framework calls camera3_device_t->ops->configure_streams() with a list + * of input/output streams to the HAL device. + * + * 5. <= CAMERA_DEVICE_API_VERSION_3_1: + * + * The framework allocates gralloc buffers and calls + * camera3_device_t->ops->register_stream_buffers() for at least one of the + * output streams listed in configure_streams. The same stream is registered + * only once. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * camera3_device_t->ops->register_stream_buffers() is not called and must + * be NULL. + * + * 6. The framework requests default settings for some number of use cases with + * calls to camera3_device_t->ops->construct_default_request_settings(). This + * may occur any time after step 3. + * + * 7. The framework constructs and sends the first capture request to the HAL, + * with settings based on one of the sets of default settings, and with at + * least one output stream, which has been registered earlier by the + * framework. This is sent to the HAL with + * camera3_device_t->ops->process_capture_request(). The HAL must block the + * return of this call until it is ready for the next request to be sent. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The buffer_handle_t provided in the camera3_stream_buffer_t array + * in the camera3_capture_request_t may be new and never-before-seen + * by the HAL on any given new request. + * + * 8. The framework continues to submit requests, and call + * construct_default_request_settings to get default settings buffers for + * other use cases. + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * + * The framework may call register_stream_buffers() at this time for + * not-yet-registered streams. + * + * 9. When the capture of a request begins (sensor starts exposing for the + * capture) or processing a reprocess request begins, the HAL + * calls camera3_callback_ops_t->notify() with the SHUTTER event, including + * the frame number and the timestamp for start of exposure. For a reprocess + * request, the timestamp must be the start of exposure of the input image + * which can be looked up with android.sensor.timestamp from + * camera3_capture_request_t.settings when process_capture_request() is + * called. + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * + * This notify call must be made before the first call to + * process_capture_result() for that frame number. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The camera3_callback_ops_t->notify() call with the SHUTTER event should + * be made as early as possible since the framework will be unable to + * deliver gralloc buffers to the application layer (for that frame) until + * it has a valid timestamp for the start of exposure (or the input image's + * start of exposure for a reprocess request). + * + * Both partial metadata results and the gralloc buffers may be sent to the + * framework at any time before or after the SHUTTER event. + * + * 10. After some pipeline delay, the HAL begins to return completed captures to + * the framework with camera3_callback_ops_t->process_capture_result(). These + * are returned in the same order as the requests were submitted. Multiple + * requests can be in flight at once, depending on the pipeline depth of the + * camera HAL device. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Once a buffer is returned by process_capture_result as part of the + * camera3_stream_buffer_t array, and the fence specified by release_fence + * has been signaled (this is a no-op for -1 fences), the ownership of that + * buffer is considered to be transferred back to the framework. After that, + * the HAL must no longer retain that particular buffer, and the + * framework may clean up the memory for it immediately. + * + * process_capture_result may be called multiple times for a single frame, + * each time with a new disjoint piece of metadata and/or set of gralloc + * buffers. The framework will accumulate these partial metadata results + * into one result. + * + * In particular, it is legal for a process_capture_result to be called + * simultaneously for both a frame N and a frame N+1 as long as the + * above rule holds for gralloc buffers (both input and output). + * + * 11. After some time, the framework may stop submitting new requests, wait for + * the existing captures to complete (all buffers filled, all results + * returned), and then call configure_streams() again. This resets the camera + * hardware and pipeline for a new set of input/output streams. Some streams + * may be reused from the previous configuration; if these streams' buffers + * had already been registered with the HAL, they will not be registered + * again. The framework then continues from step 7, if at least one + * registered output stream remains (otherwise, step 5 is required first). + * + * 12. Alternatively, the framework may call camera3_device_t->common->close() + * to end the camera session. This may be called at any time when no other + * calls from the framework are active, although the call may block until all + * in-flight captures have completed (all results returned, all buffers + * filled). After the close call returns, no more calls to the + * camera3_callback_ops_t functions are allowed from the HAL. Once the + * close() call is underway, the framework may not call any other HAL device + * functions. + * + * 13. In case of an error or other asynchronous event, the HAL must call + * camera3_callback_ops_t->notify() with the appropriate error/event + * message. After returning from a fatal device-wide error notification, the + * HAL should act as if close() had been called on it. However, the HAL must + * either cancel or complete all outstanding captures before calling + * notify(), so that once notify() is called with a fatal error, the + * framework will not receive further callbacks from the device. Methods + * besides close() should return -ENODEV or NULL after the notify() method + * returns from a fatal error message. + */ + +/** + * S3. Operational modes: + * + * The camera 3 HAL device can implement one of two possible operational modes; + * limited and full. Full support is expected from new higher-end + * devices. Limited mode has hardware requirements roughly in line with those + * for a camera HAL device v1 implementation, and is expected from older or + * inexpensive devices. Full is a strict superset of limited, and they share the + * same essential operational flow, as documented above. + * + * The HAL must indicate its level of support with the + * android.info.supportedHardwareLevel static metadata entry, with 0 indicating + * limited mode, and 1 indicating full mode support. + * + * Roughly speaking, limited-mode devices do not allow for application control + * of capture settings (3A control only), high-rate capture of high-resolution + * images, raw sensor readout, or support for YUV output streams above maximum + * recording resolution (JPEG only for large images). + * + * ** Details of limited mode behavior: + * + * - Limited-mode devices do not need to implement accurate synchronization + * between capture request settings and the actual image data + * captured. Instead, changes to settings may take effect some time in the + * future, and possibly not for the same output frame for each settings + * entry. Rapid changes in settings may result in some settings never being + * used for a capture. However, captures that include high-resolution output + * buffers ( > 1080p ) have to use the settings as specified (but see below + * for processing rate). + * + * - Limited-mode devices do not need to support most of the + * settings/result/static info metadata. Specifically, only the following settings + * are expected to be consumed or produced by a limited-mode HAL device: + * + * android.control.aeAntibandingMode (controls and dynamic) + * android.control.aeExposureCompensation (controls and dynamic) + * android.control.aeLock (controls and dynamic) + * android.control.aeMode (controls and dynamic) + * android.control.aeRegions (controls and dynamic) + * android.control.aeTargetFpsRange (controls and dynamic) + * android.control.aePrecaptureTrigger (controls and dynamic) + * android.control.afMode (controls and dynamic) + * android.control.afRegions (controls and dynamic) + * android.control.awbLock (controls and dynamic) + * android.control.awbMode (controls and dynamic) + * android.control.awbRegions (controls and dynamic) + * android.control.captureIntent (controls and dynamic) + * android.control.effectMode (controls and dynamic) + * android.control.mode (controls and dynamic) + * android.control.sceneMode (controls and dynamic) + * android.control.videoStabilizationMode (controls and dynamic) + * android.control.aeAvailableAntibandingModes (static) + * android.control.aeAvailableModes (static) + * android.control.aeAvailableTargetFpsRanges (static) + * android.control.aeCompensationRange (static) + * android.control.aeCompensationStep (static) + * android.control.afAvailableModes (static) + * android.control.availableEffects (static) + * android.control.availableSceneModes (static) + * android.control.availableVideoStabilizationModes (static) + * android.control.awbAvailableModes (static) + * android.control.maxRegions (static) + * android.control.sceneModeOverrides (static) + * android.control.aeState (dynamic) + * android.control.afState (dynamic) + * android.control.awbState (dynamic) + * + * android.flash.mode (controls and dynamic) + * android.flash.info.available (static) + * + * android.info.supportedHardwareLevel (static) + * + * android.jpeg.gpsCoordinates (controls and dynamic) + * android.jpeg.gpsProcessingMethod (controls and dynamic) + * android.jpeg.gpsTimestamp (controls and dynamic) + * android.jpeg.orientation (controls and dynamic) + * android.jpeg.quality (controls and dynamic) + * android.jpeg.thumbnailQuality (controls and dynamic) + * android.jpeg.thumbnailSize (controls and dynamic) + * android.jpeg.availableThumbnailSizes (static) + * android.jpeg.maxSize (static) + * + * android.lens.info.minimumFocusDistance (static) + * + * android.request.id (controls and dynamic) + * + * android.scaler.cropRegion (controls and dynamic) + * android.scaler.availableStreamConfigurations (static) + * android.scaler.availableMinFrameDurations (static) + * android.scaler.availableStallDurations (static) + * android.scaler.availableMaxDigitalZoom (static) + * android.scaler.maxDigitalZoom (static) + * android.scaler.croppingType (static) + * + * android.sensor.orientation (static) + * android.sensor.timestamp (dynamic) + * + * android.statistics.faceDetectMode (controls and dynamic) + * android.statistics.info.availableFaceDetectModes (static) + * android.statistics.faceIds (dynamic) + * android.statistics.faceLandmarks (dynamic) + * android.statistics.faceRectangles (dynamic) + * android.statistics.faceScores (dynamic) + * + * android.sync.frameNumber (dynamic) + * android.sync.maxLatency (static) + * + * - Captures in limited mode that include high-resolution (> 1080p) output + * buffers may block in process_capture_request() until all the output buffers + * have been filled. A full-mode HAL device must process sequences of + * high-resolution requests at the rate indicated in the static metadata for + * that pixel format. The HAL must still call process_capture_result() to + * provide the output; the framework must simply be prepared for + * process_capture_request() to block until after process_capture_result() for + * that request completes for high-resolution captures for limited-mode + * devices. + * + * - Full-mode devices must support below additional capabilities: + * - 30fps at maximum resolution is preferred, more than 20fps is required. + * - Per frame control (android.sync.maxLatency == PER_FRAME_CONTROL). + * - Sensor manual control metadata. See MANUAL_SENSOR defined in + * android.request.availableCapabilities. + * - Post-processing manual control metadata. See MANUAL_POST_PROCESSING defined + * in android.request.availableCapabilities. + * + */ + +/** + * S4. 3A modes and state machines: + * + * While the actual 3A algorithms are up to the HAL implementation, a high-level + * state machine description is defined by the HAL interface, to allow the HAL + * device and the framework to communicate about the current state of 3A, and to + * trigger 3A events. + * + * When the device is opened, all the individual 3A states must be + * STATE_INACTIVE. Stream configuration does not reset 3A. For example, locked + * focus must be maintained across the configure() call. + * + * Triggering a 3A action involves simply setting the relevant trigger entry in + * the settings for the next request to indicate start of trigger. For example, + * the trigger for starting an autofocus scan is setting the entry + * ANDROID_CONTROL_AF_TRIGGER to ANDROID_CONTROL_AF_TRIGGER_START for one + * request, and cancelling an autofocus scan is triggered by setting + * ANDROID_CONTROL_AF_TRIGGER to ANDROID_CONTRL_AF_TRIGGER_CANCEL. Otherwise, + * the entry will not exist, or be set to ANDROID_CONTROL_AF_TRIGGER_IDLE. Each + * request with a trigger entry set to a non-IDLE value will be treated as an + * independent triggering event. + * + * At the top level, 3A is controlled by the ANDROID_CONTROL_MODE setting, which + * selects between no 3A (ANDROID_CONTROL_MODE_OFF), normal AUTO mode + * (ANDROID_CONTROL_MODE_AUTO), and using the scene mode setting + * (ANDROID_CONTROL_USE_SCENE_MODE). + * + * - In OFF mode, each of the individual AE/AF/AWB modes are effectively OFF, + * and none of the capture controls may be overridden by the 3A routines. + * + * - In AUTO mode, Auto-focus, auto-exposure, and auto-whitebalance all run + * their own independent algorithms, and have their own mode, state, and + * trigger metadata entries, as listed in the next section. + * + * - In USE_SCENE_MODE, the value of the ANDROID_CONTROL_SCENE_MODE entry must + * be used to determine the behavior of 3A routines. In SCENE_MODEs other than + * FACE_PRIORITY, the HAL must override the values of + * ANDROId_CONTROL_AE/AWB/AF_MODE to be the mode it prefers for the selected + * SCENE_MODE. For example, the HAL may prefer SCENE_MODE_NIGHT to use + * CONTINUOUS_FOCUS AF mode. Any user selection of AE/AWB/AF_MODE when scene + * must be ignored for these scene modes. + * + * - For SCENE_MODE_FACE_PRIORITY, the AE/AWB/AF_MODE controls work as in + * ANDROID_CONTROL_MODE_AUTO, but the 3A routines must bias toward metering + * and focusing on any detected faces in the scene. + * + * S4.1. Auto-focus settings and result entries: + * + * Main metadata entries: + * + * ANDROID_CONTROL_AF_MODE: Control for selecting the current autofocus + * mode. Set by the framework in the request settings. + * + * AF_MODE_OFF: AF is disabled; the framework/app directly controls lens + * position. + * + * AF_MODE_AUTO: Single-sweep autofocus. No lens movement unless AF is + * triggered. + * + * AF_MODE_MACRO: Single-sweep up-close autofocus. No lens movement unless + * AF is triggered. + * + * AF_MODE_CONTINUOUS_VIDEO: Smooth continuous focusing, for recording + * video. Triggering immediately locks focus in current + * position. Canceling resumes cotinuous focusing. + * + * AF_MODE_CONTINUOUS_PICTURE: Fast continuous focusing, for + * zero-shutter-lag still capture. Triggering locks focus once currently + * active sweep concludes. Canceling resumes continuous focusing. + * + * AF_MODE_EDOF: Advanced extended depth of field focusing. There is no + * autofocus scan, so triggering one or canceling one has no effect. + * Images are focused automatically by the HAL. + * + * ANDROID_CONTROL_AF_STATE: Dynamic metadata describing the current AF + * algorithm state, reported by the HAL in the result metadata. + * + * AF_STATE_INACTIVE: No focusing has been done, or algorithm was + * reset. Lens is not moving. Always the state for MODE_OFF or MODE_EDOF. + * When the device is opened, it must start in this state. + * + * AF_STATE_PASSIVE_SCAN: A continuous focus algorithm is currently scanning + * for good focus. The lens is moving. + * + * AF_STATE_PASSIVE_FOCUSED: A continuous focus algorithm believes it is + * well focused. The lens is not moving. The HAL may spontaneously leave + * this state. + * + * AF_STATE_PASSIVE_UNFOCUSED: A continuous focus algorithm believes it is + * not well focused. The lens is not moving. The HAL may spontaneously + * leave this state. + * + * AF_STATE_ACTIVE_SCAN: A scan triggered by the user is underway. + * + * AF_STATE_FOCUSED_LOCKED: The AF algorithm believes it is focused. The + * lens is not moving. + * + * AF_STATE_NOT_FOCUSED_LOCKED: The AF algorithm has been unable to + * focus. The lens is not moving. + * + * ANDROID_CONTROL_AF_TRIGGER: Control for starting an autofocus scan, the + * meaning of which is mode- and state- dependent. Set by the framework in + * the request settings. + * + * AF_TRIGGER_IDLE: No current trigger. + * + * AF_TRIGGER_START: Trigger start of AF scan. Effect is mode and state + * dependent. + * + * AF_TRIGGER_CANCEL: Cancel current AF scan if any, and reset algorithm to + * default. + * + * Additional metadata entries: + * + * ANDROID_CONTROL_AF_REGIONS: Control for selecting the regions of the FOV + * that should be used to determine good focus. This applies to all AF + * modes that scan for focus. Set by the framework in the request + * settings. + * + * S4.2. Auto-exposure settings and result entries: + * + * Main metadata entries: + * + * ANDROID_CONTROL_AE_MODE: Control for selecting the current auto-exposure + * mode. Set by the framework in the request settings. + * + * AE_MODE_OFF: Autoexposure is disabled; the user controls exposure, gain, + * frame duration, and flash. + * + * AE_MODE_ON: Standard autoexposure, with flash control disabled. User may + * set flash to fire or to torch mode. + * + * AE_MODE_ON_AUTO_FLASH: Standard autoexposure, with flash on at HAL's + * discretion for precapture and still capture. User control of flash + * disabled. + * + * AE_MODE_ON_ALWAYS_FLASH: Standard autoexposure, with flash always fired + * for capture, and at HAL's discretion for precapture.. User control of + * flash disabled. + * + * AE_MODE_ON_AUTO_FLASH_REDEYE: Standard autoexposure, with flash on at + * HAL's discretion for precapture and still capture. Use a flash burst + * at end of precapture sequence to reduce redeye in the final + * picture. User control of flash disabled. + * + * ANDROID_CONTROL_AE_STATE: Dynamic metadata describing the current AE + * algorithm state, reported by the HAL in the result metadata. + * + * AE_STATE_INACTIVE: Initial AE state after mode switch. When the device is + * opened, it must start in this state. + * + * AE_STATE_SEARCHING: AE is not converged to a good value, and is adjusting + * exposure parameters. + * + * AE_STATE_CONVERGED: AE has found good exposure values for the current + * scene, and the exposure parameters are not changing. HAL may + * spontaneously leave this state to search for better solution. + * + * AE_STATE_LOCKED: AE has been locked with the AE_LOCK control. Exposure + * values are not changing. + * + * AE_STATE_FLASH_REQUIRED: The HAL has converged exposure, but believes + * flash is required for a sufficiently bright picture. Used for + * determining if a zero-shutter-lag frame can be used. + * + * AE_STATE_PRECAPTURE: The HAL is in the middle of a precapture + * sequence. Depending on AE mode, this mode may involve firing the + * flash for metering, or a burst of flash pulses for redeye reduction. + * + * ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER: Control for starting a metering + * sequence before capturing a high-quality image. Set by the framework in + * the request settings. + * + * PRECAPTURE_TRIGGER_IDLE: No current trigger. + * + * PRECAPTURE_TRIGGER_START: Start a precapture sequence. The HAL should + * use the subsequent requests to measure good exposure/white balance + * for an upcoming high-resolution capture. + * + * Additional metadata entries: + * + * ANDROID_CONTROL_AE_LOCK: Control for locking AE controls to their current + * values + * + * ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION: Control for adjusting AE + * algorithm target brightness point. + * + * ANDROID_CONTROL_AE_TARGET_FPS_RANGE: Control for selecting the target frame + * rate range for the AE algorithm. The AE routine cannot change the frame + * rate to be outside these bounds. + * + * ANDROID_CONTROL_AE_REGIONS: Control for selecting the regions of the FOV + * that should be used to determine good exposure levels. This applies to + * all AE modes besides OFF. + * + * S4.3. Auto-whitebalance settings and result entries: + * + * Main metadata entries: + * + * ANDROID_CONTROL_AWB_MODE: Control for selecting the current white-balance + * mode. + * + * AWB_MODE_OFF: Auto-whitebalance is disabled. User controls color matrix. + * + * AWB_MODE_AUTO: Automatic white balance is enabled; 3A controls color + * transform, possibly using more complex transforms than a simple + * matrix. + * + * AWB_MODE_INCANDESCENT: Fixed white balance settings good for indoor + * incandescent (tungsten) lighting, roughly 2700K. + * + * AWB_MODE_FLUORESCENT: Fixed white balance settings good for fluorescent + * lighting, roughly 5000K. + * + * AWB_MODE_WARM_FLUORESCENT: Fixed white balance settings good for + * fluorescent lighting, roughly 3000K. + * + * AWB_MODE_DAYLIGHT: Fixed white balance settings good for daylight, + * roughly 5500K. + * + * AWB_MODE_CLOUDY_DAYLIGHT: Fixed white balance settings good for clouded + * daylight, roughly 6500K. + * + * AWB_MODE_TWILIGHT: Fixed white balance settings good for + * near-sunset/sunrise, roughly 15000K. + * + * AWB_MODE_SHADE: Fixed white balance settings good for areas indirectly + * lit by the sun, roughly 7500K. + * + * ANDROID_CONTROL_AWB_STATE: Dynamic metadata describing the current AWB + * algorithm state, reported by the HAL in the result metadata. + * + * AWB_STATE_INACTIVE: Initial AWB state after mode switch. When the device + * is opened, it must start in this state. + * + * AWB_STATE_SEARCHING: AWB is not converged to a good value, and is + * changing color adjustment parameters. + * + * AWB_STATE_CONVERGED: AWB has found good color adjustment values for the + * current scene, and the parameters are not changing. HAL may + * spontaneously leave this state to search for better solution. + * + * AWB_STATE_LOCKED: AWB has been locked with the AWB_LOCK control. Color + * adjustment values are not changing. + * + * Additional metadata entries: + * + * ANDROID_CONTROL_AWB_LOCK: Control for locking AWB color adjustments to + * their current values. + * + * ANDROID_CONTROL_AWB_REGIONS: Control for selecting the regions of the FOV + * that should be used to determine good color balance. This applies only + * to auto-WB mode. + * + * S4.4. General state machine transition notes + * + * Switching between AF, AE, or AWB modes always resets the algorithm's state + * to INACTIVE. Similarly, switching between CONTROL_MODE or + * CONTROL_SCENE_MODE if CONTROL_MODE == USE_SCENE_MODE resets all the + * algorithm states to INACTIVE. + * + * The tables below are per-mode. + * + * S4.5. AF state machines + * + * when enabling AF or changing AF mode + *| state | trans. cause | new state | notes | + *+--------------------+---------------+--------------------+------------------+ + *| Any | AF mode change| INACTIVE | | + *+--------------------+---------------+--------------------+------------------+ + * + * mode = AF_MODE_OFF or AF_MODE_EDOF + *| state | trans. cause | new state | notes | + *+--------------------+---------------+--------------------+------------------+ + *| INACTIVE | | INACTIVE | Never changes | + *+--------------------+---------------+--------------------+------------------+ + * + * mode = AF_MODE_AUTO or AF_MODE_MACRO + *| state | trans. cause | new state | notes | + *+--------------------+---------------+--------------------+------------------+ + *| INACTIVE | AF_TRIGGER | ACTIVE_SCAN | Start AF sweep | + *| | | | Lens now moving | + *+--------------------+---------------+--------------------+------------------+ + *| ACTIVE_SCAN | AF sweep done | FOCUSED_LOCKED | If AF successful | + *| | | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| ACTIVE_SCAN | AF sweep done | NOT_FOCUSED_LOCKED | If AF successful | + *| | | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| ACTIVE_SCAN | AF_CANCEL | INACTIVE | Cancel/reset AF | + *| | | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| FOCUSED_LOCKED | AF_CANCEL | INACTIVE | Cancel/reset AF | + *+--------------------+---------------+--------------------+------------------+ + *| FOCUSED_LOCKED | AF_TRIGGER | ACTIVE_SCAN | Start new sweep | + *| | | | Lens now moving | + *+--------------------+---------------+--------------------+------------------+ + *| NOT_FOCUSED_LOCKED | AF_CANCEL | INACTIVE | Cancel/reset AF | + *+--------------------+---------------+--------------------+------------------+ + *| NOT_FOCUSED_LOCKED | AF_TRIGGER | ACTIVE_SCAN | Start new sweep | + *| | | | Lens now moving | + *+--------------------+---------------+--------------------+------------------+ + *| All states | mode change | INACTIVE | | + *+--------------------+---------------+--------------------+------------------+ + * + * mode = AF_MODE_CONTINUOUS_VIDEO + *| state | trans. cause | new state | notes | + *+--------------------+---------------+--------------------+------------------+ + *| INACTIVE | HAL initiates | PASSIVE_SCAN | Start AF scan | + *| | new scan | | Lens now moving | + *+--------------------+---------------+--------------------+------------------+ + *| INACTIVE | AF_TRIGGER | NOT_FOCUSED_LOCKED | AF state query | + *| | | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_SCAN | HAL completes | PASSIVE_FOCUSED | End AF scan | + *| | current scan | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_SCAN | HAL fails | PASSIVE_UNFOCUSED | End AF scan | + *| | current scan | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_SCAN | AF_TRIGGER | FOCUSED_LOCKED | Immediate trans. | + *| | | | if focus is good | + *| | | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_SCAN | AF_TRIGGER | NOT_FOCUSED_LOCKED | Immediate trans. | + *| | | | if focus is bad | + *| | | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_SCAN | AF_CANCEL | INACTIVE | Reset lens | + *| | | | position | + *| | | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_FOCUSED | HAL initiates | PASSIVE_SCAN | Start AF scan | + *| | new scan | | Lens now moving | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_UNFOCUSED | HAL initiates | PASSIVE_SCAN | Start AF scan | + *| | new scan | | Lens now moving | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_FOCUSED | AF_TRIGGER | FOCUSED_LOCKED | Immediate trans. | + *| | | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_UNFOCUSED | AF_TRIGGER | NOT_FOCUSED_LOCKED | Immediate trans. | + *| | | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| FOCUSED_LOCKED | AF_TRIGGER | FOCUSED_LOCKED | No effect | + *+--------------------+---------------+--------------------+------------------+ + *| FOCUSED_LOCKED | AF_CANCEL | INACTIVE | Restart AF scan | + *+--------------------+---------------+--------------------+------------------+ + *| NOT_FOCUSED_LOCKED | AF_TRIGGER | NOT_FOCUSED_LOCKED | No effect | + *+--------------------+---------------+--------------------+------------------+ + *| NOT_FOCUSED_LOCKED | AF_CANCEL | INACTIVE | Restart AF scan | + *+--------------------+---------------+--------------------+------------------+ + * + * mode = AF_MODE_CONTINUOUS_PICTURE + *| state | trans. cause | new state | notes | + *+--------------------+---------------+--------------------+------------------+ + *| INACTIVE | HAL initiates | PASSIVE_SCAN | Start AF scan | + *| | new scan | | Lens now moving | + *+--------------------+---------------+--------------------+------------------+ + *| INACTIVE | AF_TRIGGER | NOT_FOCUSED_LOCKED | AF state query | + *| | | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_SCAN | HAL completes | PASSIVE_FOCUSED | End AF scan | + *| | current scan | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_SCAN | HAL fails | PASSIVE_UNFOCUSED | End AF scan | + *| | current scan | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_SCAN | AF_TRIGGER | FOCUSED_LOCKED | Eventual trans. | + *| | | | once focus good | + *| | | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_SCAN | AF_TRIGGER | NOT_FOCUSED_LOCKED | Eventual trans. | + *| | | | if cannot focus | + *| | | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_SCAN | AF_CANCEL | INACTIVE | Reset lens | + *| | | | position | + *| | | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_FOCUSED | HAL initiates | PASSIVE_SCAN | Start AF scan | + *| | new scan | | Lens now moving | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_UNFOCUSED | HAL initiates | PASSIVE_SCAN | Start AF scan | + *| | new scan | | Lens now moving | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_FOCUSED | AF_TRIGGER | FOCUSED_LOCKED | Immediate trans. | + *| | | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| PASSIVE_UNFOCUSED | AF_TRIGGER | NOT_FOCUSED_LOCKED | Immediate trans. | + *| | | | Lens now locked | + *+--------------------+---------------+--------------------+------------------+ + *| FOCUSED_LOCKED | AF_TRIGGER | FOCUSED_LOCKED | No effect | + *+--------------------+---------------+--------------------+------------------+ + *| FOCUSED_LOCKED | AF_CANCEL | INACTIVE | Restart AF scan | + *+--------------------+---------------+--------------------+------------------+ + *| NOT_FOCUSED_LOCKED | AF_TRIGGER | NOT_FOCUSED_LOCKED | No effect | + *+--------------------+---------------+--------------------+------------------+ + *| NOT_FOCUSED_LOCKED | AF_CANCEL | INACTIVE | Restart AF scan | + *+--------------------+---------------+--------------------+------------------+ + * + * S4.6. AE and AWB state machines + * + * The AE and AWB state machines are mostly identical. AE has additional + * FLASH_REQUIRED and PRECAPTURE states. So rows below that refer to those two + * states should be ignored for the AWB state machine. + * + * when enabling AE/AWB or changing AE/AWB mode + *| state | trans. cause | new state | notes | + *+--------------------+---------------+--------------------+------------------+ + *| Any | mode change | INACTIVE | | + *+--------------------+---------------+--------------------+------------------+ + * + * mode = AE_MODE_OFF / AWB mode not AUTO + *| state | trans. cause | new state | notes | + *+--------------------+---------------+--------------------+------------------+ + *| INACTIVE | | INACTIVE | AE/AWB disabled | + *+--------------------+---------------+--------------------+------------------+ + * + * mode = AE_MODE_ON_* / AWB_MODE_AUTO + *| state | trans. cause | new state | notes | + *+--------------------+---------------+--------------------+------------------+ + *| INACTIVE | HAL initiates | SEARCHING | | + *| | AE/AWB scan | | | + *+--------------------+---------------+--------------------+------------------+ + *| INACTIVE | AE/AWB_LOCK | LOCKED | values locked | + *| | on | | | + *+--------------------+---------------+--------------------+------------------+ + *| SEARCHING | HAL finishes | CONVERGED | good values, not | + *| | AE/AWB scan | | changing | + *+--------------------+---------------+--------------------+------------------+ + *| SEARCHING | HAL finishes | FLASH_REQUIRED | converged but too| + *| | AE scan | | dark w/o flash | + *+--------------------+---------------+--------------------+------------------+ + *| SEARCHING | AE/AWB_LOCK | LOCKED | values locked | + *| | on | | | + *+--------------------+---------------+--------------------+------------------+ + *| CONVERGED | HAL initiates | SEARCHING | values locked | + *| | AE/AWB scan | | | + *+--------------------+---------------+--------------------+------------------+ + *| CONVERGED | AE/AWB_LOCK | LOCKED | values locked | + *| | on | | | + *+--------------------+---------------+--------------------+------------------+ + *| FLASH_REQUIRED | HAL initiates | SEARCHING | values locked | + *| | AE/AWB scan | | | + *+--------------------+---------------+--------------------+------------------+ + *| FLASH_REQUIRED | AE/AWB_LOCK | LOCKED | values locked | + *| | on | | | + *+--------------------+---------------+--------------------+------------------+ + *| LOCKED | AE/AWB_LOCK | SEARCHING | values not good | + *| | off | | after unlock | + *+--------------------+---------------+--------------------+------------------+ + *| LOCKED | AE/AWB_LOCK | CONVERGED | values good | + *| | off | | after unlock | + *+--------------------+---------------+--------------------+------------------+ + *| LOCKED | AE_LOCK | FLASH_REQUIRED | exposure good, | + *| | off | | but too dark | + *+--------------------+---------------+--------------------+------------------+ + *| All AE states | PRECAPTURE_ | PRECAPTURE | Start precapture | + *| | START | | sequence | + *+--------------------+---------------+--------------------+------------------+ + *| PRECAPTURE | Sequence done.| CONVERGED | Ready for high- | + *| | AE_LOCK off | | quality capture | + *+--------------------+---------------+--------------------+------------------+ + *| PRECAPTURE | Sequence done.| LOCKED | Ready for high- | + *| | AE_LOCK on | | quality capture | + *+--------------------+---------------+--------------------+------------------+ + * + */ + +/** + * S5. Cropping: + * + * Cropping of the full pixel array (for digital zoom and other use cases where + * a smaller FOV is desirable) is communicated through the + * ANDROID_SCALER_CROP_REGION setting. This is a per-request setting, and can + * change on a per-request basis, which is critical for implementing smooth + * digital zoom. + * + * The region is defined as a rectangle (x, y, width, height), with (x, y) + * describing the top-left corner of the rectangle. The rectangle is defined on + * the coordinate system of the sensor active pixel array, with (0,0) being the + * top-left pixel of the active pixel array. Therefore, the width and height + * cannot be larger than the dimensions reported in the + * ANDROID_SENSOR_ACTIVE_PIXEL_ARRAY static info field. The minimum allowed + * width and height are reported by the HAL through the + * ANDROID_SCALER_MAX_DIGITAL_ZOOM static info field, which describes the + * maximum supported zoom factor. Therefore, the minimum crop region width and + * height are: + * + * {width, height} = + * { floor(ANDROID_SENSOR_ACTIVE_PIXEL_ARRAY[0] / + * ANDROID_SCALER_MAX_DIGITAL_ZOOM), + * floor(ANDROID_SENSOR_ACTIVE_PIXEL_ARRAY[1] / + * ANDROID_SCALER_MAX_DIGITAL_ZOOM) } + * + * If the crop region needs to fulfill specific requirements (for example, it + * needs to start on even coordinates, and its width/height needs to be even), + * the HAL must do the necessary rounding and write out the final crop region + * used in the output result metadata. Similarly, if the HAL implements video + * stabilization, it must adjust the result crop region to describe the region + * actually included in the output after video stabilization is applied. In + * general, a camera-using application must be able to determine the field of + * view it is receiving based on the crop region, the dimensions of the image + * sensor, and the lens focal length. + * + * It is assumed that the cropping is applied after raw to other color space + * conversion. Raw streams (RAW16 and RAW_OPAQUE) don't have this conversion stage, + * and are not croppable. Therefore, the crop region must be ignored by the HAL + * for raw streams. + * + * Since the crop region applies to all non-raw streams, which may have different aspect + * ratios than the crop region, the exact sensor region used for each stream may + * be smaller than the crop region. Specifically, each stream should maintain + * square pixels and its aspect ratio by minimally further cropping the defined + * crop region. If the stream's aspect ratio is wider than the crop region, the + * stream should be further cropped vertically, and if the stream's aspect ratio + * is narrower than the crop region, the stream should be further cropped + * horizontally. + * + * In all cases, the stream crop must be centered within the full crop region, + * and each stream is only either cropped horizontally or vertical relative to + * the full crop region, never both. + * + * For example, if two streams are defined, a 640x480 stream (4:3 aspect), and a + * 1280x720 stream (16:9 aspect), below demonstrates the expected output regions + * for each stream for a few sample crop regions, on a hypothetical 3 MP (2000 x + * 1500 pixel array) sensor. + * + * Crop region: (500, 375, 1000, 750) (4:3 aspect ratio) + * + * 640x480 stream crop: (500, 375, 1000, 750) (equal to crop region) + * 1280x720 stream crop: (500, 469, 1000, 562) (marked with =) + * + * 0 1000 2000 + * +---------+---------+---------+----------+ + * | Active pixel array | + * | | + * | | + * + +-------------------+ + 375 + * | | | | + * | O===================O | + * | I 1280x720 stream I | + * + I I + 750 + * | I I | + * | O===================O | + * | | | | + * + +-------------------+ + 1125 + * | Crop region, 640x480 stream | + * | | + * | | + * +---------+---------+---------+----------+ 1500 + * + * Crop region: (500, 375, 1333, 750) (16:9 aspect ratio) + * + * 640x480 stream crop: (666, 375, 1000, 750) (marked with =) + * 1280x720 stream crop: (500, 375, 1333, 750) (equal to crop region) + * + * 0 1000 2000 + * +---------+---------+---------+----------+ + * | Active pixel array | + * | | + * | | + * + +---O==================O---+ + 375 + * | | I 640x480 stream I | | + * | | I I | | + * | | I I | | + * + | I I | + 750 + * | | I I | | + * | | I I | | + * | | I I | | + * + +---O==================O---+ + 1125 + * | Crop region, 1280x720 stream | + * | | + * | | + * +---------+---------+---------+----------+ 1500 + * + * Crop region: (500, 375, 750, 750) (1:1 aspect ratio) + * + * 640x480 stream crop: (500, 469, 750, 562) (marked with =) + * 1280x720 stream crop: (500, 543, 750, 414) (marged with #) + * + * 0 1000 2000 + * +---------+---------+---------+----------+ + * | Active pixel array | + * | | + * | | + * + +--------------+ + 375 + * | O==============O | + * | ################ | + * | # # | + * + # # + 750 + * | # # | + * | ################ 1280x720 | + * | O==============O 640x480 | + * + +--------------+ + 1125 + * | Crop region | + * | | + * | | + * +---------+---------+---------+----------+ 1500 + * + * And a final example, a 1024x1024 square aspect ratio stream instead of the + * 480p stream: + * + * Crop region: (500, 375, 1000, 750) (4:3 aspect ratio) + * + * 1024x1024 stream crop: (625, 375, 750, 750) (marked with #) + * 1280x720 stream crop: (500, 469, 1000, 562) (marked with =) + * + * 0 1000 2000 + * +---------+---------+---------+----------+ + * | Active pixel array | + * | | + * | 1024x1024 stream | + * + +--###############--+ + 375 + * | | # # | | + * | O===================O | + * | I 1280x720 stream I | + * + I I + 750 + * | I I | + * | O===================O | + * | | # # | | + * + +--###############--+ + 1125 + * | Crop region | + * | | + * | | + * +---------+---------+---------+----------+ 1500 + * + */ + +/** + * S6. Error management: + * + * Camera HAL device ops functions that have a return value will all return + * -ENODEV / NULL in case of a serious error. This means the device cannot + * continue operation, and must be closed by the framework. Once this error is + * returned by some method, or if notify() is called with ERROR_DEVICE, only + * the close() method can be called successfully. All other methods will return + * -ENODEV / NULL. + * + * If a device op is called in the wrong sequence, for example if the framework + * calls configure_streams() is called before initialize(), the device must + * return -ENOSYS from the call, and do nothing. + * + * Transient errors in image capture must be reported through notify() as follows: + * + * - The failure of an entire capture to occur must be reported by the HAL by + * calling notify() with ERROR_REQUEST. Individual errors for the result + * metadata or the output buffers must not be reported in this case. + * + * - If the metadata for a capture cannot be produced, but some image buffers + * were filled, the HAL must call notify() with ERROR_RESULT. + * + * - If an output image buffer could not be filled, but either the metadata was + * produced or some other buffers were filled, the HAL must call notify() with + * ERROR_BUFFER for each failed buffer. + * + * In each of these transient failure cases, the HAL must still call + * process_capture_result, with valid output and input (if an input buffer was + * submitted) buffer_handle_t. If the result metadata could not be produced, it + * should be NULL. If some buffers could not be filled, they must be returned with + * process_capture_result in the error state, their release fences must be set to + * the acquire fences passed by the framework, or -1 if they have been waited on by + * the HAL already. + * + * Invalid input arguments result in -EINVAL from the appropriate methods. In + * that case, the framework must act as if that call had never been made. + * + */ + +/** + * S7. Key Performance Indicator (KPI) glossary: + * + * This includes some critical definitions that are used by KPI metrics. + * + * Pipeline Latency: + * For a given capture request, the duration from the framework calling + * process_capture_request to the HAL sending capture result and all buffers + * back by process_capture_result call. To make the Pipeline Latency measure + * independent of frame rate, it is measured by frame count. + * + * For example, when frame rate is 30 (fps), the frame duration (time interval + * between adjacent frame capture time) is 33 (ms). + * If it takes 5 frames for framework to get the result and buffers back for + * a given request, then the Pipeline Latency is 5 (frames), instead of + * 5 x 33 = 165 (ms). + * + * The Pipeline Latency is determined by android.request.pipelineDepth and + * android.request.pipelineMaxDepth, see their definitions for more details. + * + */ + +/** + * S8. Sample Use Cases: + * + * This includes some typical use case examples the camera HAL may support. + * + * S8.1 Zero Shutter Lag (ZSL) with CAMERA3_STREAM_BIDIRECTIONAL stream. + * + * For this use case, the bidirectional stream will be used by the framework as follows: + * + * 1. The framework includes a buffer from this stream as output buffer in a + * request as normal. + * + * 2. Once the HAL device returns a filled output buffer to the framework, + * the framework may do one of two things with the filled buffer: + * + * 2. a. The framework uses the filled data, and returns the now-used buffer + * to the stream queue for reuse. This behavior exactly matches the + * OUTPUT type of stream. + * + * 2. b. The framework wants to reprocess the filled data, and uses the + * buffer as an input buffer for a request. Once the HAL device has + * used the reprocessing buffer, it then returns it to the + * framework. The framework then returns the now-used buffer to the + * stream queue for reuse. + * + * 3. The HAL device will be given the buffer again as an output buffer for + * a request at some future point. + * + * For ZSL use case, the pixel format for bidirectional stream will be + * HAL_PIXEL_FORMAT_RAW_OPAQUE or HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED if it + * is listed in android.scaler.availableInputOutputFormatsMap. When + * HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED is used, the gralloc + * usage flags for the consumer endpoint will be set to GRALLOC_USAGE_HW_CAMERA_ZSL. + * A configuration stream list that has BIDIRECTIONAL stream used as input, will + * usually also have a distinct OUTPUT stream to get the reprocessing data. For example, + * for the ZSL use case, the stream list might be configured with the following: + * + * - A HAL_PIXEL_FORMAT_RAW_OPAQUE bidirectional stream is used + * as input. + * - And a HAL_PIXEL_FORMAT_BLOB (JPEG) output stream. + * + * S8.2 ZSL (OPAQUE) reprocessing with CAMERA3_STREAM_INPUT stream. + * + * CAMERA_DEVICE_API_VERSION_3_3: + * When OPAQUE_REPROCESSING capability is supported by the camera device, the INPUT stream + * can be used for application/framework implemented use case like Zero Shutter Lag (ZSL). + * This kind of stream will be used by the framework as follows: + * + * 1. Application/framework configures an opaque (RAW or YUV based) format output stream that is + * used to produce the ZSL output buffers. The stream pixel format will be + * HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED. + * + * 2. Application/framework configures an opaque format input stream that is used to + * send the reprocessing ZSL buffers to the HAL. The stream pixel format will + * also be HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED. + * + * 3. Application/framework configures a YUV/JPEG output stream that is used to receive the + * reprocessed data. The stream pixel format will be YCbCr_420/HAL_PIXEL_FORMAT_BLOB. + * + * 4. Application/framework picks a ZSL buffer from the ZSL output stream when a ZSL capture is + * issued by the application, and sends the data back as an input buffer in a + * reprocessing request, then sends to the HAL for reprocessing. + * + * 5. The HAL sends back the output YUV/JPEG result to framework. + * + * The HAL can select the actual opaque buffer format and configure the ISP pipeline + * appropriately based on the HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED format and + * the gralloc usage flag GRALLOC_USAGE_HW_CAMERA_ZSL. + + * S8.3 YUV reprocessing with CAMERA3_STREAM_INPUT stream. + * + * When YUV reprocessing is supported by the HAL, the INPUT stream + * can be used for the YUV reprocessing use cases like lucky-shot and image fusion. + * This kind of stream will be used by the framework as follows: + * + * 1. Application/framework configures an YCbCr_420 format output stream that is + * used to produce the output buffers. + * + * 2. Application/framework configures an YCbCr_420 format input stream that is used to + * send the reprocessing YUV buffers to the HAL. + * + * 3. Application/framework configures a YUV/JPEG output stream that is used to receive the + * reprocessed data. The stream pixel format will be YCbCr_420/HAL_PIXEL_FORMAT_BLOB. + * + * 4. Application/framework processes the output buffers (could be as simple as picking + * an output buffer directly) from the output stream when a capture is issued, and sends + * the data back as an input buffer in a reprocessing request, then sends to the HAL + * for reprocessing. + * + * 5. The HAL sends back the output YUV/JPEG result to framework. + * + */ + +/** + * S9. Notes on Controls and Metadata + * + * This section contains notes about the interpretation and usage of various metadata tags. + * + * S9.1 HIGH_QUALITY and FAST modes. + * + * Many camera post-processing blocks may be listed as having HIGH_QUALITY, + * FAST, and OFF operating modes. These blocks will typically also have an + * 'available modes' tag representing which of these operating modes are + * available on a given device. The general policy regarding implementing + * these modes is as follows: + * + * 1. Operating mode controls of hardware blocks that cannot be disabled + * must not list OFF in their corresponding 'available modes' tags. + * + * 2. OFF will always be included in their corresponding 'available modes' + * tag if it is possible to disable that hardware block. + * + * 3. FAST must always be included in the 'available modes' tags for all + * post-processing blocks supported on the device. If a post-processing + * block also has a slower and higher quality operating mode that does + * not meet the framerate requirements for FAST mode, HIGH_QUALITY should + * be included in the 'available modes' tag to represent this operating + * mode. + */ + +/** + * S10. Reprocessing flow and controls + * + * This section describes the OPAQUE and YUV reprocessing flow and controls. OPAQUE reprocessing + * uses an opaque format that is not directly application-visible, and the application can + * only select some of the output buffers and send back to HAL for reprocessing, while YUV + * reprocessing gives the application opportunity to process the buffers before reprocessing. + * + * S8 gives the stream configurations for the typical reprocessing uses cases, + * this section specifies the buffer flow and controls in more details. + * + * S10.1 OPAQUE (typically for ZSL use case) reprocessing flow and controls + * + * For OPAQUE reprocessing (e.g. ZSL) use case, after the application creates the specific + * output and input streams, runtime buffer flow and controls are specified as below: + * + * 1. Application starts output streaming by sending repeating requests for output + * opaque buffers and preview. The buffers are held by an application + * maintained circular buffer. The requests are based on CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG + * capture template, which should have all necessary settings that guarantee output + * frame rate is not slowed down relative to sensor output frame rate. + * + * 2. When a capture is issued, the application selects one output buffer based + * on application buffer selection logic, e.g. good AE and AF statistics etc. + * Application then creates an reprocess request based on the capture result associated + * with this selected buffer. The selected output buffer is now added to this reprocess + * request as an input buffer, the output buffer of this reprocess request should be + * either JPEG output buffer or YUV output buffer, or both, depending on the application + * choice. + * + * 3. Application then alters the reprocess settings to get best image quality. The HAL must + * support and only support below controls if the HAL support OPAQUE_REPROCESSING capability: + * - android.jpeg.* (if JPEG buffer is included as one of the output) + * - android.noiseReduction.mode (change to HIGH_QUALITY if it is supported) + * - android.edge.mode (change to HIGH_QUALITY if it is supported) + * All other controls must be ignored by the HAL. + * 4. HAL processed the input buffer and return the output buffers in the capture results + * as normal. + * + * S10.2 YUV reprocessing flow and controls + * + * The YUV reprocessing buffer flow is similar as OPAQUE reprocessing, with below difference: + * + * 1. Application may want to have finer granularity control of the intermediate YUV images + * (before reprocessing). For example, application may choose + * - android.noiseReduction.mode == MINIMAL + * to make sure the no YUV domain noise reduction has applied to the output YUV buffers, + * then it can do its own advanced noise reduction on them. For OPAQUE reprocessing case, this + * doesn't matter, as long as the final reprocessed image has the best quality. + * 2. Application may modify the YUV output buffer data. For example, for image fusion use + * case, where multiple output images are merged together to improve the signal-to-noise + * ratio (SNR). The input buffer may be generated from multiple buffers by the application. + * To avoid excessive amount of noise reduction and insufficient amount of edge enhancement + * being applied to the input buffer, the application can hint the HAL how much effective + * exposure time improvement has been done by the application, then the HAL can adjust the + * noise reduction and edge enhancement paramters to get best reprocessed image quality. + * Below tag can be used for this purpose: + * - android.reprocess.effectiveExposureFactor + * The value would be exposure time increase factor applied to the original output image, + * for example, if there are N image merged, the exposure time increase factor would be up + * to sqrt(N). See this tag spec for more details. + * + * S10.3 Reprocessing pipeline characteristics + * + * Reprocessing pipeline has below different characteristics comparing with normal output + * pipeline: + * + * 1. The reprocessing result can be returned ahead of the pending normal output results. But + * the FIFO ordering must be maintained for all reprocessing results. For example, there are + * below requests (A stands for output requests, B stands for reprocessing requests) + * being processed by the HAL: + * A1, A2, A3, A4, B1, A5, B2, A6... + * result of B1 can be returned before A1-A4, but result of B2 must be returned after B1. + * 2. Single input rule: For a given reprocessing request, all output buffers must be from the + * input buffer, rather than sensor output. For example, if a reprocess request include both + * JPEG and preview buffers, all output buffers must be produced from the input buffer + * included by the reprocessing request, rather than sensor. The HAL must not output preview + * buffers from sensor, while output JPEG buffer from the input buffer. + * 3. Input buffer will be from camera output directly (ZSL case) or indirectly(image fusion + * case). For the case where buffer is modified, the size will remain same. The HAL can + * notify CAMERA3_MSG_ERROR_REQUEST if buffer from unknown source is sent. + * 4. Result as reprocessing request: The HAL can expect that a reprocessing request is a copy + * of one of the output results with minor allowed setting changes. The HAL can notify + * CAMERA3_MSG_ERROR_REQUEST if a request from unknown source is issued. + * 5. Output buffers may not be used as inputs across the configure stream boundary, This is + * because an opaque stream like the ZSL output stream may have different actual image size + * inside of the ZSL buffer to save power and bandwidth for smaller resolution JPEG capture. + * The HAL may notify CAMERA3_MSG_ERROR_REQUEST if this case occurs. + * 6. HAL Reprocess requests error reporting during flush should follow the same rule specified + * by flush() method. + * + */ + +__BEGIN_DECLS + +struct camera3_device; + +/********************************************************************** + * + * Camera3 stream and stream buffer definitions. + * + * These structs and enums define the handles and contents of the input and + * output streams connecting the HAL to various framework and application buffer + * consumers. Each stream is backed by a gralloc buffer queue. + * + */ + +/** + * camera3_stream_type_t: + * + * The type of the camera stream, which defines whether the camera HAL device is + * the producer or the consumer for that stream, and how the buffers of the + * stream relate to the other streams. + */ +typedef enum camera3_stream_type { + /** + * This stream is an output stream; the camera HAL device will be + * responsible for filling buffers from this stream with newly captured or + * reprocessed image data. + */ + CAMERA3_STREAM_OUTPUT = 0, + + /** + * This stream is an input stream; the camera HAL device will be responsible + * for reading buffers from this stream and sending them through the camera + * processing pipeline, as if the buffer was a newly captured image from the + * imager. + * + * The pixel format for input stream can be any format reported by + * android.scaler.availableInputOutputFormatsMap. The pixel format of the + * output stream that is used to produce the reprocessing data may be any + * format reported by android.scaler.availableStreamConfigurations. The + * supported input/output stream combinations depends the camera device + * capabilities, see android.scaler.availableInputOutputFormatsMap for + * stream map details. + * + * This kind of stream is generally used to reprocess data into higher + * quality images (that otherwise would cause a frame rate performance + * loss), or to do off-line reprocessing. + * + * CAMERA_DEVICE_API_VERSION_3_3: + * The typical use cases are OPAQUE (typically ZSL) and YUV reprocessing, + * see S8.2, S8.3 and S10 for more details. + */ + CAMERA3_STREAM_INPUT = 1, + + /** + * This stream can be used for input and output. Typically, the stream is + * used as an output stream, but occasionally one already-filled buffer may + * be sent back to the HAL device for reprocessing. + * + * This kind of stream is meant generally for Zero Shutter Lag (ZSL) + * features, where copying the captured image from the output buffer to the + * reprocessing input buffer would be expensive. See S8.1 for more details. + * + * Note that the HAL will always be reprocessing data it produced. + * + */ + CAMERA3_STREAM_BIDIRECTIONAL = 2, + + /** + * Total number of framework-defined stream types + */ + CAMERA3_NUM_STREAM_TYPES + +} camera3_stream_type_t; + +/** + * camera3_stream_rotation_t: + * + * The required counterclockwise rotation of camera stream. + */ +typedef enum camera3_stream_rotation { + /* No rotation */ + CAMERA3_STREAM_ROTATION_0 = 0, + + /* Rotate by 90 degree counterclockwise */ + CAMERA3_STREAM_ROTATION_90 = 1, + + /* Rotate by 180 degree counterclockwise */ + CAMERA3_STREAM_ROTATION_180 = 2, + + /* Rotate by 270 degree counterclockwise */ + CAMERA3_STREAM_ROTATION_270 = 3 +} camera3_stream_rotation_t; + +/** + * camera3_stream_configuration_mode_t: + * + * This defines the general operation mode for the HAL (for a given stream configuration), where + * modes besides NORMAL have different semantics, and usually limit the generality of the API in + * exchange for higher performance in some particular area. + */ +typedef enum camera3_stream_configuration_mode { + /** + * Normal stream configuration operation mode. This is the default camera operation mode, + * where all semantics of HAL APIs and metadata controls apply. + */ + CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE = 0, + + /** + * Special constrained high speed operation mode for devices that can not support high + * speed output in NORMAL mode. All streams in this configuration are operating at high speed + * mode and have different characteristics and limitations to achieve high speed output. + * The NORMAL mode can still be used for high speed output if the HAL can support high speed + * output while satisfying all the semantics of HAL APIs and metadata controls. It is + * recommended for the HAL to support high speed output in NORMAL mode (by advertising the high + * speed FPS ranges in android.control.aeAvailableTargetFpsRanges) if possible. + * + * This mode has below limitations/requirements: + * + * 1. The HAL must support up to 2 streams with sizes reported by + * android.control.availableHighSpeedVideoConfigurations. + * 2. In this mode, the HAL is expected to output up to 120fps or higher. This mode must + * support the targeted FPS range and size configurations reported by + * android.control.availableHighSpeedVideoConfigurations. + * 3. The HAL must support HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED output stream format. + * 4. To achieve efficient high speed streaming, the HAL may have to aggregate + * multiple frames together and send to camera device for processing where the request + * controls are same for all the frames in this batch (batch mode). The HAL must support + * max batch size and the max batch size requirements defined by + * android.control.availableHighSpeedVideoConfigurations. + * 5. In this mode, the HAL must override aeMode, awbMode, and afMode to ON, ON, and + * CONTINUOUS_VIDEO, respectively. All post-processing block mode controls must be + * overridden to be FAST. Therefore, no manual control of capture and post-processing + * parameters is possible. All other controls operate the same as when + * android.control.mode == AUTO. This means that all other android.control.* fields + * must continue to work, such as + * + * android.control.aeTargetFpsRange + * android.control.aeExposureCompensation + * android.control.aeLock + * android.control.awbLock + * android.control.effectMode + * android.control.aeRegions + * android.control.afRegions + * android.control.awbRegions + * android.control.afTrigger + * android.control.aePrecaptureTrigger + * + * Outside of android.control.*, the following controls must work: + * + * android.flash.mode (TORCH mode only, automatic flash for still capture will not work + * since aeMode is ON) + * android.lens.opticalStabilizationMode (if it is supported) + * android.scaler.cropRegion + * android.statistics.faceDetectMode (if it is supported) + * 6. To reduce the amount of data passed across process boundaries at + * high frame rate, within one batch, camera framework only propagates + * the last shutter notify and the last capture results (including partial + * results and final result) to the app. The shutter notifies and capture + * results for the other requests in the batch are derived by + * the camera framework. As a result, the HAL can return empty metadata + * except for the last result in the batch. + * + * For more details about high speed stream requirements, see + * android.control.availableHighSpeedVideoConfigurations and CONSTRAINED_HIGH_SPEED_VIDEO + * capability defined in android.request.availableCapabilities. + * + * This mode only needs to be supported by HALs that include CONSTRAINED_HIGH_SPEED_VIDEO in + * the android.request.availableCapabilities static metadata. + */ + CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE = 1, + + /** + * First value for vendor-defined stream configuration modes. + */ + CAMERA3_VENDOR_STREAM_CONFIGURATION_MODE_START = 0x8000 +} camera3_stream_configuration_mode_t; + +/** + * camera3_stream_t: + * + * A handle to a single camera input or output stream. A stream is defined by + * the framework by its buffer resolution and format, and additionally by the + * HAL with the gralloc usage flags and the maximum in-flight buffer count. + * + * The stream structures are owned by the framework, but pointers to a + * camera3_stream passed into the HAL by configure_streams() are valid until the + * end of the first subsequent configure_streams() call that _does not_ include + * that camera3_stream as an argument, or until the end of the close() call. + * + * All camera3_stream framework-controlled members are immutable once the + * camera3_stream is passed into configure_streams(). The HAL may only change + * the HAL-controlled parameters during a configure_streams() call, except for + * the contents of the private pointer. + * + * If a configure_streams() call returns a non-fatal error, all active streams + * remain valid as if configure_streams() had not been called. + * + * The endpoint of the stream is not visible to the camera HAL device. + * In DEVICE_API_VERSION_3_1, this was changed to share consumer usage flags + * on streams where the camera is a producer (OUTPUT and BIDIRECTIONAL stream + * types) see the usage field below. + */ +typedef struct camera3_stream { + + /***** + * Set by framework before configure_streams() + */ + + /** + * The type of the stream, one of the camera3_stream_type_t values. + */ + int stream_type; + + /** + * The width in pixels of the buffers in this stream + */ + uint32_t width; + + /** + * The height in pixels of the buffers in this stream + */ + uint32_t height; + + /** + * The pixel format for the buffers in this stream. Format is a value from + * the HAL_PIXEL_FORMAT_* list in system/core/include/system/graphics.h, or + * from device-specific headers. + * + * If HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED is used, then the platform + * gralloc module will select a format based on the usage flags provided by + * the camera device and the other endpoint of the stream. + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * + * The camera HAL device must inspect the buffers handed to it in the + * subsequent register_stream_buffers() call to obtain the + * implementation-specific format details, if necessary. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * register_stream_buffers() won't be called by the framework, so the HAL + * should configure the ISP and sensor pipeline based purely on the sizes, + * usage flags, and formats for the configured streams. + */ + int format; + + /***** + * Set by HAL during configure_streams(). + */ + + /** + * The gralloc usage flags for this stream, as needed by the HAL. The usage + * flags are defined in gralloc.h (GRALLOC_USAGE_*), or in device-specific + * headers. + * + * For output streams, these are the HAL's producer usage flags. For input + * streams, these are the HAL's consumer usage flags. The usage flags from + * the producer and the consumer will be combined together and then passed + * to the platform gralloc HAL module for allocating the gralloc buffers for + * each stream. + * + * Version information: + * + * == CAMERA_DEVICE_API_VERSION_3_0: + * + * No initial value guaranteed when passed via configure_streams(). + * HAL may not use this field as input, and must write over this field + * with its usage flags. + * + * >= CAMERA_DEVICE_API_VERSION_3_1: + * + * For stream_type OUTPUT and BIDIRECTIONAL, when passed via + * configure_streams(), the initial value of this is the consumer's + * usage flags. The HAL may use these consumer flags to decide stream + * configuration. + * For stream_type INPUT, when passed via configure_streams(), the initial + * value of this is 0. + * For all streams passed via configure_streams(), the HAL must write + * over this field with its usage flags. + * + * From Android O, the usage flag for an output stream may be bitwise + * combination of usage flags for multiple consumers, for the purpose of + * sharing one camera stream between those consumers. The HAL must fail + * configure_streams call with -EINVAL if the combined flags cannot be + * supported due to imcompatible buffer format, dataSpace, or other hardware + * limitations. + */ + uint32_t usage; + + /** + * The maximum number of buffers the HAL device may need to have dequeued at + * the same time. The HAL device may not have more buffers in-flight from + * this stream than this value. + */ + uint32_t max_buffers; + + /** + * A handle to HAL-private information for the stream. Will not be inspected + * by the framework code. + */ + void *priv; + + /** + * A field that describes the contents of the buffer. The format and buffer + * dimensions define the memory layout and structure of the stream buffers, + * while dataSpace defines the meaning of the data within the buffer. + * + * For most formats, dataSpace defines the color space of the image data. + * In addition, for some formats, dataSpace indicates whether image- or + * depth-based data is requested. See system/core/include/system/graphics.h + * for details of formats and valid dataSpace values for each format. + * + * Version information: + * + * < CAMERA_DEVICE_API_VERSION_3_3: + * + * Not defined and should not be accessed. dataSpace should be assumed to + * be HAL_DATASPACE_UNKNOWN, and the appropriate color space, etc, should + * be determined from the usage flags and the format. + * + * = CAMERA_DEVICE_API_VERSION_3_3: + * + * Always set by the camera service. HAL must use this dataSpace to + * configure the stream to the correct colorspace, or to select between + * color and depth outputs if supported. The dataspace values are the + * legacy definitions in graphics.h + * + * >= CAMERA_DEVICE_API_VERSION_3_4: + * + * Always set by the camera service. HAL must use this dataSpace to + * configure the stream to the correct colorspace, or to select between + * color and depth outputs if supported. The dataspace values are set + * using the V0 dataspace definitions in graphics.h + */ + android_dataspace_t data_space; + + /** + * The required output rotation of the stream, one of + * the camera3_stream_rotation_t values. This must be inspected by HAL along + * with stream width and height. For example, if the rotation is 90 degree + * and the stream width and height is 720 and 1280 respectively, camera service + * will supply buffers of size 720x1280, and HAL should capture a 1280x720 image + * and rotate the image by 90 degree counterclockwise. The rotation field is + * no-op when the stream type is input. Camera HAL must ignore the rotation + * field for an input stream. + * + * <= CAMERA_DEVICE_API_VERSION_3_2: + * + * Not defined and must not be accessed. HAL must not apply any rotation + * on output images. + * + * >= CAMERA_DEVICE_API_VERSION_3_3: + * + * Always set by camera service. HAL must inspect this field during stream + * configuration and returns -EINVAL if HAL cannot perform such rotation. + * HAL must always support CAMERA3_STREAM_ROTATION_0, so a + * configure_streams() call must not fail for unsupported rotation if + * rotation field of all streams is CAMERA3_STREAM_ROTATION_0. + * + */ + int rotation; + + /** + * The physical camera id this stream belongs to. + * + * <= CAMERA_DEVICE_API_VERISON_3_4: + * + * Not defined and must not be accessed. + * + * >= CAMERA_DEVICE_API_VERISON_3_5: + * + * Always set by camera service. If the camera device is not a logical + * multi camera, or if the camera is a logical multi camera but the stream + * is not a physical output stream, this field will point to a 0-length + * string. + * + * A logical multi camera is a camera device backed by multiple physical + * cameras that are also exposed to the application. And for a logical + * multi camera, a physical output stream is an output stream specifically + * requested on an underlying physical camera. + * + * For an input stream, this field is guaranteed to be a 0-length string. + */ + const char* physical_camera_id; + + /** + * This should be one of the camera3_stream_rotation_t values except for + * CAMERA3_STREAM_ROTATION_180. + * When setting to CAMERA3_STREAM_ROTATION_90 or CAMERA3_STREAM_ROTATION_270, HAL would crop, + * rotate the frame by the specified degrees clockwise and scale it up to original size. + * In Chrome OS, it's possible to have a portrait activity run in a landscape screen with + * landscape-mounted camera. The activity would show stretched or rotated preview because it + * does not expect to receive landscape preview frames. To solve this problem, we ask HAL to + * crop, rotate and scale the frames and modify CameraCharacteristics.SENSOR_ORIENTATION + * accordingly to imitate a portrait camera. + * Setting it to CAMERA3_STREAM_ROTATION_0 means no crop-rotate-scale would be performed. + * |cros_rotate_scale_degrees| in all camera3_stream_t of a configure_streams() call must be + * identical. The HAL should return -EINVAL if the degrees are not the same for all the streams. + */ + int crop_rotate_scale_degrees; + + /* reserved for future use */ + void *reserved[5]; + +} camera3_stream_t; + +/** + * camera3_stream_configuration_t: + * + * A structure of stream definitions, used by configure_streams(). This + * structure defines all the output streams and the reprocessing input + * stream for the current camera use case. + */ +typedef struct camera3_stream_configuration { + /** + * The total number of streams requested by the framework. This includes + * both input and output streams. The number of streams will be at least 1, + * and there will be at least one output-capable stream. + */ + uint32_t num_streams; + + /** + * An array of camera stream pointers, defining the input/output + * configuration for the camera HAL device. + * + * At most one input-capable stream may be defined (INPUT or BIDIRECTIONAL) + * in a single configuration. + * + * At least one output-capable stream must be defined (OUTPUT or + * BIDIRECTIONAL). + */ + camera3_stream_t **streams; + + /** + * >= CAMERA_DEVICE_API_VERSION_3_3: + * + * The operation mode of streams in this configuration, one of the value + * defined in camera3_stream_configuration_mode_t. The HAL can use this + * mode as an indicator to set the stream property (e.g., + * camera3_stream->max_buffers) appropriately. For example, if the + * configuration is + * CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE, the HAL may + * want to set aside more buffers for batch mode operation (see + * android.control.availableHighSpeedVideoConfigurations for batch mode + * definition). + * + */ + uint32_t operation_mode; + + /** + * >= CAMERA_DEVICE_API_VERSION_3_5: + * + * The session metadata buffer contains the initial values of + * ANDROID_REQUEST_AVAILABLE_SESSION_KEYS. This field is optional + * and camera clients can choose to ignore it, in which case it will + * be set to NULL. If parameters are present, then Hal should examine + * the parameter values and configure its internal camera pipeline + * accordingly. + */ + const camera_metadata_t *session_parameters; +} camera3_stream_configuration_t; + +/** + * camera3_buffer_status_t: + * + * The current status of a single stream buffer. + */ +typedef enum camera3_buffer_status { + /** + * The buffer is in a normal state, and can be used after waiting on its + * sync fence. + */ + CAMERA3_BUFFER_STATUS_OK = 0, + + /** + * The buffer does not contain valid data, and the data in it should not be + * used. The sync fence must still be waited on before reusing the buffer. + */ + CAMERA3_BUFFER_STATUS_ERROR = 1 + +} camera3_buffer_status_t; + +/** + * camera3_stream_buffer_t: + * + * A single buffer from a camera3 stream. It includes a handle to its parent + * stream, the handle to the gralloc buffer itself, and sync fences + * + * The buffer does not specify whether it is to be used for input or output; + * that is determined by its parent stream type and how the buffer is passed to + * the HAL device. + */ +typedef struct camera3_stream_buffer { + /** + * The handle of the stream this buffer is associated with + */ + camera3_stream_t *stream; + + /** + * The native handle to the buffer + */ + buffer_handle_t *buffer; + + /** + * Current state of the buffer, one of the camera3_buffer_status_t + * values. The framework will not pass buffers to the HAL that are in an + * error state. In case a buffer could not be filled by the HAL, it must + * have its status set to CAMERA3_BUFFER_STATUS_ERROR when returned to the + * framework with process_capture_result(). + */ + int status; + + /** + * The acquire sync fence for this buffer. The HAL must wait on this fence + * fd before attempting to read from or write to this buffer. + * + * The framework may be set to -1 to indicate that no waiting is necessary + * for this buffer. + * + * When the HAL returns an output buffer to the framework with + * process_capture_result(), the acquire_fence must be set to -1. If the HAL + * never waits on the acquire_fence due to an error in filling a buffer, + * when calling process_capture_result() the HAL must set the release_fence + * of the buffer to be the acquire_fence passed to it by the framework. This + * will allow the framework to wait on the fence before reusing the buffer. + * + * For input buffers, the HAL must not change the acquire_fence field during + * the process_capture_request() call. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * When the HAL returns an input buffer to the framework with + * process_capture_result(), the acquire_fence must be set to -1. If the HAL + * never waits on input buffer acquire fence due to an error, the sync + * fences should be handled similarly to the way they are handled for output + * buffers. + */ + int acquire_fence; + + /** + * The release sync fence for this buffer. The HAL must set this fence when + * returning buffers to the framework, or write -1 to indicate that no + * waiting is required for this buffer. + * + * For the output buffers, the fences must be set in the output_buffers + * array passed to process_capture_result(). + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * + * For the input buffer, the release fence must be set by the + * process_capture_request() call. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * For the input buffer, the fences must be set in the input_buffer + * passed to process_capture_result(). + * + * After signaling the release_fence for this buffer, the HAL + * should not make any further attempts to access this buffer as the + * ownership has been fully transferred back to the framework. + * + * If a fence of -1 was specified then the ownership of this buffer + * is transferred back immediately upon the call of process_capture_result. + */ + int release_fence; + +} camera3_stream_buffer_t; + +/** + * camera3_stream_buffer_set_t: + * + * The complete set of gralloc buffers for a stream. This structure is given to + * register_stream_buffers() to allow the camera HAL device to register/map/etc + * newly allocated stream buffers. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Deprecated (and not used). In particular, + * register_stream_buffers is also deprecated and will never be invoked. + * + */ +typedef struct camera3_stream_buffer_set { + /** + * The stream handle for the stream these buffers belong to + */ + camera3_stream_t *stream; + + /** + * The number of buffers in this stream. It is guaranteed to be at least + * stream->max_buffers. + */ + uint32_t num_buffers; + + /** + * The array of gralloc buffer handles for this stream. If the stream format + * is set to HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, the camera HAL device + * should inspect the passed-in buffers to determine any platform-private + * pixel format information. + */ + buffer_handle_t **buffers; + +} camera3_stream_buffer_set_t; + +/** + * camera3_jpeg_blob: + * + * Transport header for compressed JPEG buffers in output streams. + * + * To capture JPEG images, a stream is created using the pixel format + * HAL_PIXEL_FORMAT_BLOB. The buffer size for the stream is calculated by the + * framework, based on the static metadata field android.jpeg.maxSize. Since + * compressed JPEG images are of variable size, the HAL needs to include the + * final size of the compressed image using this structure inside the output + * stream buffer. The JPEG blob ID field must be set to CAMERA3_JPEG_BLOB_ID. + * + * Transport header should be at the end of the JPEG output stream buffer. That + * means the jpeg_blob_id must start at byte[buffer_size - + * sizeof(camera3_jpeg_blob)], where the buffer_size is the size of gralloc buffer. + * Any HAL using this transport header must account for it in android.jpeg.maxSize + * The JPEG data itself starts at the beginning of the buffer and should be + * jpeg_size bytes long. + */ +typedef struct camera3_jpeg_blob { + uint16_t jpeg_blob_id; + uint32_t jpeg_size; +} camera3_jpeg_blob_t; + +enum { + CAMERA3_JPEG_BLOB_ID = 0x00FF +}; + +/********************************************************************** + * + * Message definitions for the HAL notify() callback. + * + * These definitions are used for the HAL notify callback, to signal + * asynchronous events from the HAL device to the Android framework. + * + */ + +/** + * camera3_msg_type: + * + * Indicates the type of message sent, which specifies which member of the + * message union is valid. + * + */ +typedef enum camera3_msg_type { + /** + * An error has occurred. camera3_notify_msg.message.error contains the + * error information. + */ + CAMERA3_MSG_ERROR = 1, + + /** + * The exposure of a given request or processing a reprocess request has + * begun. camera3_notify_msg.message.shutter contains the information + * the capture. + */ + CAMERA3_MSG_SHUTTER = 2, + + /** + * Number of framework message types + */ + CAMERA3_NUM_MESSAGES + +} camera3_msg_type_t; + +/** + * Defined error codes for CAMERA_MSG_ERROR + */ +typedef enum camera3_error_msg_code { + /** + * A serious failure occured. No further frames or buffer streams will + * be produced by the device. Device should be treated as closed. The + * client must reopen the device to use it again. The frame_number field + * is unused. + */ + CAMERA3_MSG_ERROR_DEVICE = 1, + + /** + * An error has occurred in processing a request. No output (metadata or + * buffers) will be produced for this request. The frame_number field + * specifies which request has been dropped. Subsequent requests are + * unaffected, and the device remains operational. + */ + CAMERA3_MSG_ERROR_REQUEST = 2, + + /** + * An error has occurred in producing an output result metadata buffer + * for a request, but output stream buffers for it will still be + * available. Subsequent requests are unaffected, and the device remains + * operational. The frame_number field specifies the request for which + * result metadata won't be available. + */ + CAMERA3_MSG_ERROR_RESULT = 3, + + /** + * An error has occurred in placing an output buffer into a stream for a + * request. The frame metadata and other buffers may still be + * available. Subsequent requests are unaffected, and the device remains + * operational. The frame_number field specifies the request for which the + * buffer was dropped, and error_stream contains a pointer to the stream + * that dropped the frame. + */ + CAMERA3_MSG_ERROR_BUFFER = 4, + + /** + * Number of error types + */ + CAMERA3_MSG_NUM_ERRORS + +} camera3_error_msg_code_t; + +/** + * camera3_error_msg_t: + * + * Message contents for CAMERA3_MSG_ERROR + */ +typedef struct camera3_error_msg { + /** + * Frame number of the request the error applies to. 0 if the frame number + * isn't applicable to the error. + */ + uint32_t frame_number; + + /** + * Pointer to the stream that had a failure. NULL if the stream isn't + * applicable to the error. + */ + camera3_stream_t *error_stream; + + /** + * The code for this error; one of the CAMERA_MSG_ERROR enum values. + */ + int error_code; + +} camera3_error_msg_t; + +/** + * camera3_shutter_msg_t: + * + * Message contents for CAMERA3_MSG_SHUTTER + */ +typedef struct camera3_shutter_msg { + /** + * Frame number of the request that has begun exposure or reprocessing. + */ + uint32_t frame_number; + + /** + * Timestamp for the start of capture. For a reprocess request, this must + * be input image's start of capture. This must match the capture result + * metadata's sensor exposure start timestamp. + */ + uint64_t timestamp; + +} camera3_shutter_msg_t; + +/** + * camera3_notify_msg_t: + * + * The message structure sent to camera3_callback_ops_t.notify() + */ +typedef struct camera3_notify_msg { + + /** + * The message type. One of camera3_notify_msg_type, or a private extension. + */ + int type; + + union { + /** + * Error message contents. Valid if type is CAMERA3_MSG_ERROR + */ + camera3_error_msg_t error; + + /** + * Shutter message contents. Valid if type is CAMERA3_MSG_SHUTTER + */ + camera3_shutter_msg_t shutter; + + /** + * Generic message contents. Used to ensure a minimum size for custom + * message types. + */ + uint8_t generic[32]; + } message; + +} camera3_notify_msg_t; + +/********************************************************************** + * + * Capture request/result definitions for the HAL process_capture_request() + * method, and the process_capture_result() callback. + * + */ + +/** + * camera3_request_template_t: + * + * Available template types for + * camera3_device_ops.construct_default_request_settings() + */ +typedef enum camera3_request_template { + /** + * Standard camera preview operation with 3A on auto. + */ + CAMERA3_TEMPLATE_PREVIEW = 1, + + /** + * Standard camera high-quality still capture with 3A and flash on auto. + */ + CAMERA3_TEMPLATE_STILL_CAPTURE = 2, + + /** + * Standard video recording plus preview with 3A on auto, torch off. + */ + CAMERA3_TEMPLATE_VIDEO_RECORD = 3, + + /** + * High-quality still capture while recording video. Application will + * include preview, video record, and full-resolution YUV or JPEG streams in + * request. Must not cause stuttering on video stream. 3A on auto. + */ + CAMERA3_TEMPLATE_VIDEO_SNAPSHOT = 4, + + /** + * Zero-shutter-lag mode. Application will request preview and + * full-resolution data for each frame, and reprocess it to JPEG when a + * still image is requested by user. Settings should provide highest-quality + * full-resolution images without compromising preview frame rate. 3A on + * auto. + */ + CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG = 5, + + /** + * A basic template for direct application control of capture + * parameters. All automatic control is disabled (auto-exposure, auto-white + * balance, auto-focus), and post-processing parameters are set to preview + * quality. The manual capture parameters (exposure, sensitivity, etc.) + * are set to reasonable defaults, but should be overridden by the + * application depending on the intended use case. + */ + CAMERA3_TEMPLATE_MANUAL = 6, + + /* Total number of templates */ + CAMERA3_TEMPLATE_COUNT, + + /** + * First value for vendor-defined request templates + */ + CAMERA3_VENDOR_TEMPLATE_START = 0x40000000 + +} camera3_request_template_t; + +/** + * camera3_capture_request_t: + * + * A single request for image capture/buffer reprocessing, sent to the Camera + * HAL device by the framework in process_capture_request(). + * + * The request contains the settings to be used for this capture, and the set of + * output buffers to write the resulting image data in. It may optionally + * contain an input buffer, in which case the request is for reprocessing that + * input buffer instead of capturing a new image with the camera sensor. The + * capture is identified by the frame_number. + * + * In response, the camera HAL device must send a camera3_capture_result + * structure asynchronously to the framework, using the process_capture_result() + * callback. + */ +typedef struct camera3_capture_request { + /** + * The frame number is an incrementing integer set by the framework to + * uniquely identify this capture. It needs to be returned in the result + * call, and is also used to identify the request in asynchronous + * notifications sent to camera3_callback_ops_t.notify(). + */ + uint32_t frame_number; + + /** + * The settings buffer contains the capture and processing parameters for + * the request. As a special case, a NULL settings buffer indicates that the + * settings are identical to the most-recently submitted capture request. A + * NULL buffer cannot be used as the first submitted request after a + * configure_streams() call. + */ + const camera_metadata_t *settings; + + /** + * The input stream buffer to use for this request, if any. + * + * If input_buffer is NULL, then the request is for a new capture from the + * imager. If input_buffer is valid, the request is for reprocessing the + * image contained in input_buffer. + * + * In the latter case, the HAL must set the release_fence of the + * input_buffer to a valid sync fence, or to -1 if the HAL does not support + * sync, before process_capture_request() returns. + * + * The HAL is required to wait on the acquire sync fence of the input buffer + * before accessing it. + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * + * Any input buffer included here will have been registered with the HAL + * through register_stream_buffers() before its inclusion in a request. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The buffers will not have been pre-registered with the HAL. + * Subsequent requests may reuse buffers, or provide entirely new buffers. + */ + camera3_stream_buffer_t *input_buffer; + + /** + * The number of output buffers for this capture request. Must be at least + * 1. + */ + uint32_t num_output_buffers; + + /** + * An array of num_output_buffers stream buffers, to be filled with image + * data from this capture/reprocess. The HAL must wait on the acquire fences + * of each stream buffer before writing to them. + * + * The HAL takes ownership of the actual buffer_handle_t entries in + * output_buffers; the framework does not access them until they are + * returned in a camera3_capture_result_t. + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * + * All the buffers included here will have been registered with the HAL + * through register_stream_buffers() before their inclusion in a request. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Any or all of the buffers included here may be brand new in this + * request (having never before seen by the HAL). + */ + const camera3_stream_buffer_t *output_buffers; + + /** + * <= CAMERA_DEVICE_API_VERISON_3_4: + * + * Not defined and must not be accessed. + * + * >= CAMERA_DEVICE_API_VERSION_3_5: + * The number of physical camera settings to be applied. If 'num_physcam_settings' + * equals 0 or a physical device is not included, then Hal must decide the + * specific physical device settings based on the default 'settings'. + */ + uint32_t num_physcam_settings; + + /** + * <= CAMERA_DEVICE_API_VERISON_3_4: + * + * Not defined and must not be accessed. + * + * >= CAMERA_DEVICE_API_VERSION_3_5: + * The physical camera ids. The array will contain 'num_physcam_settings' + * camera id strings for all physical devices that have specific settings. + * In case some id is invalid, the process capture request must fail and return + * -EINVAL. + */ + const char **physcam_id; + + /** + * <= CAMERA_DEVICE_API_VERISON_3_4: + * + * Not defined and must not be accessed. + * + * >= CAMERA_DEVICE_API_VERSION_3_5: + * The capture settings for the physical cameras. The array will contain + * 'num_physcam_settings' settings for invididual physical devices. In + * case the settings at some particular index are empty, the process capture + * request must fail and return -EINVAL. + */ + const camera_metadata_t **physcam_settings; + +} camera3_capture_request_t; + +/** + * camera3_capture_result_t: + * + * The result of a single capture/reprocess by the camera HAL device. This is + * sent to the framework asynchronously with process_capture_result(), in + * response to a single capture request sent to the HAL with + * process_capture_request(). Multiple process_capture_result() calls may be + * performed by the HAL for each request. + * + * Each call, all with the same frame + * number, may contain some subset of the output buffers, and/or the result + * metadata. The metadata may only be provided once for a given frame number; + * all other calls must set the result metadata to NULL. + * + * The result structure contains the output metadata from this capture, and the + * set of output buffers that have been/will be filled for this capture. Each + * output buffer may come with a release sync fence that the framework will wait + * on before reading, in case the buffer has not yet been filled by the HAL. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The metadata may be provided multiple times for a single frame number. The + * framework will accumulate together the final result set by combining each + * partial result together into the total result set. + * + * If an input buffer is given in a request, the HAL must return it in one of + * the process_capture_result calls, and the call may be to just return the input + * buffer, without metadata and output buffers; the sync fences must be handled + * the same way they are done for output buffers. + * + * + * Performance considerations: + * + * Applications will also receive these partial results immediately, so sending + * partial results is a highly recommended performance optimization to avoid + * the total pipeline latency before sending the results for what is known very + * early on in the pipeline. + * + * A typical use case might be calculating the AF state halfway through the + * pipeline; by sending the state back to the framework immediately, we get a + * 50% performance increase and perceived responsiveness of the auto-focus. + * + */ +typedef struct camera3_capture_result { + /** + * The frame number is an incrementing integer set by the framework in the + * submitted request to uniquely identify this capture. It is also used to + * identify the request in asynchronous notifications sent to + * camera3_callback_ops_t.notify(). + */ + uint32_t frame_number; + + /** + * The result metadata for this capture. This contains information about the + * final capture parameters, the state of the capture and post-processing + * hardware, the state of the 3A algorithms, if enabled, and the output of + * any enabled statistics units. + * + * Only one call to process_capture_result() with a given frame_number may + * include the result metadata. All other calls for the same frame_number + * must set this to NULL. + * + * If there was an error producing the result metadata, result must be an + * empty metadata buffer, and notify() must be called with ERROR_RESULT. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Multiple calls to process_capture_result() with a given frame_number + * may include the result metadata. + * + * Partial metadata submitted should not include any metadata key returned + * in a previous partial result for a given frame. Each new partial result + * for that frame must also set a distinct partial_result value. + * + * If notify has been called with ERROR_RESULT, all further partial + * results for that frame are ignored by the framework. + */ + const camera_metadata_t *result; + + /** + * The number of output buffers returned in this result structure. Must be + * less than or equal to the matching capture request's count. If this is + * less than the buffer count in the capture request, at least one more call + * to process_capture_result with the same frame_number must be made, to + * return the remaining output buffers to the framework. This may only be + * zero if the structure includes valid result metadata or an input buffer + * is returned in this result. + */ + uint32_t num_output_buffers; + + /** + * The handles for the output stream buffers for this capture. They may not + * yet be filled at the time the HAL calls process_capture_result(); the + * framework will wait on the release sync fences provided by the HAL before + * reading the buffers. + * + * The HAL must set the stream buffer's release sync fence to a valid sync + * fd, or to -1 if the buffer has already been filled. + * + * If the HAL encounters an error while processing the buffer, and the + * buffer is not filled, the buffer's status field must be set to + * CAMERA3_BUFFER_STATUS_ERROR. If the HAL did not wait on the acquire fence + * before encountering the error, the acquire fence should be copied into + * the release fence, to allow the framework to wait on the fence before + * reusing the buffer. + * + * The acquire fence must be set to -1 for all output buffers. If + * num_output_buffers is zero, this may be NULL. In that case, at least one + * more process_capture_result call must be made by the HAL to provide the + * output buffers. + * + * When process_capture_result is called with a new buffer for a frame, + * all previous frames' buffers for that corresponding stream must have been + * already delivered (the fences need not have yet been signaled). + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Gralloc buffers for a frame may be sent to framework before the + * corresponding SHUTTER-notify. + * + * Performance considerations: + * + * Buffers delivered to the framework will not be dispatched to the + * application layer until a start of exposure timestamp has been received + * via a SHUTTER notify() call. It is highly recommended to + * dispatch that call as early as possible. + */ + const camera3_stream_buffer_t *output_buffers; + + /** + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The handle for the input stream buffer for this capture. It may not + * yet be consumed at the time the HAL calls process_capture_result(); the + * framework will wait on the release sync fences provided by the HAL before + * reusing the buffer. + * + * The HAL should handle the sync fences the same way they are done for + * output_buffers. + * + * Only one input buffer is allowed to be sent per request. Similarly to + * output buffers, the ordering of returned input buffers must be + * maintained by the HAL. + * + * Performance considerations: + * + * The input buffer should be returned as early as possible. If the HAL + * supports sync fences, it can call process_capture_result to hand it back + * with sync fences being set appropriately. If the sync fences are not + * supported, the buffer can only be returned when it is consumed, which + * may take long time; the HAL may choose to copy this input buffer to make + * the buffer return sooner. + */ + const camera3_stream_buffer_t *input_buffer; + + /** + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * In order to take advantage of partial results, the HAL must set the + * static metadata android.request.partialResultCount to the number of + * partial results it will send for each frame. + * + * Each new capture result with a partial result must set + * this field (partial_result) to a distinct inclusive value between + * 1 and android.request.partialResultCount. + * + * HALs not wishing to take advantage of this feature must not + * set an android.request.partialResultCount or partial_result to a value + * other than 1. + * + * This value must be set to 0 when a capture result contains buffers only + * and no metadata. + */ + uint32_t partial_result; + + /** + * >= CAMERA_DEVICE_API_VERSION_3_5: + * + * Specifies the number of physical camera metadata this capture result + * contains. It must be equal to the number of physical cameras being + * requested from. + * + * If the current camera device is not a logical multi-camera, or the + * corresponding capture_request doesn't request on any physical camera, + * this field must be 0. + */ + uint32_t num_physcam_metadata; + + /** + * >= CAMERA_DEVICE_API_VERSION_3_5: + * + * An array of strings containing the physical camera ids for the returned + * physical camera metadata. The length of the array is + * num_physcam_metadata. + */ + const char **physcam_ids; + + /** + * >= CAMERA_DEVICE_API_VERSION_3_5: + * + * The array of physical camera metadata for the physical cameras being + * requested upon. This array should have a 1-to-1 mapping with the + * physcam_ids. The length of the array is num_physcam_metadata. + */ + const camera_metadata_t **physcam_metadata; + +} camera3_capture_result_t; + +/********************************************************************** + * + * Callback methods for the HAL to call into the framework. + * + * These methods are used to return metadata and image buffers for a completed + * or failed captures, and to notify the framework of asynchronous events such + * as errors. + * + * The framework will not call back into the HAL from within these callbacks, + * and these calls will not block for extended periods. + * + */ +typedef struct camera3_callback_ops { + + /** + * process_capture_result: + * + * Send results from a completed capture to the framework. + * process_capture_result() may be invoked multiple times by the HAL in + * response to a single capture request. This allows, for example, the + * metadata and low-resolution buffers to be returned in one call, and + * post-processed JPEG buffers in a later call, once it is available. Each + * call must include the frame number of the request it is returning + * metadata or buffers for. + * + * A component (buffer or metadata) of the complete result may only be + * included in one process_capture_result call. A buffer for each stream, + * and the result metadata, must be returned by the HAL for each request in + * one of the process_capture_result calls, even in case of errors producing + * some of the output. A call to process_capture_result() with neither + * output buffers or result metadata is not allowed. + * + * The order of returning metadata and buffers for a single result does not + * matter, but buffers for a given stream must be returned in FIFO order. So + * the buffer for request 5 for stream A must always be returned before the + * buffer for request 6 for stream A. This also applies to the result + * metadata; the metadata for request 5 must be returned before the metadata + * for request 6. + * + * However, different streams are independent of each other, so it is + * acceptable and expected that the buffer for request 5 for stream A may be + * returned after the buffer for request 6 for stream B is. And it is + * acceptable that the result metadata for request 6 for stream B is + * returned before the buffer for request 5 for stream A is. + * + * The HAL retains ownership of result structure, which only needs to be + * valid to access during this call. The framework will copy whatever it + * needs before this call returns. + * + * The output buffers do not need to be filled yet; the framework will wait + * on the stream buffer release sync fence before reading the buffer + * data. Therefore, this method should be called by the HAL as soon as + * possible, even if some or all of the output buffers are still in + * being filled. The HAL must include valid release sync fences into each + * output_buffers stream buffer entry, or -1 if that stream buffer is + * already filled. + * + * If the result buffer cannot be constructed for a request, the HAL should + * return an empty metadata buffer, but still provide the output buffers and + * their sync fences. In addition, notify() must be called with an + * ERROR_RESULT message. + * + * If an output buffer cannot be filled, its status field must be set to + * STATUS_ERROR. In addition, notify() must be called with a ERROR_BUFFER + * message. + * + * If the entire capture has failed, then this method still needs to be + * called to return the output buffers to the framework. All the buffer + * statuses should be STATUS_ERROR, and the result metadata should be an + * empty buffer. In addition, notify() must be called with a ERROR_REQUEST + * message. In this case, individual ERROR_RESULT/ERROR_BUFFER messages + * should not be sent. + * + * Performance requirements: + * + * This is a non-blocking call. The framework will return this call in 5ms. + * + * The pipeline latency (see S7 for definition) should be less than or equal to + * 4 frame intervals, and must be less than or equal to 8 frame intervals. + * + */ + void (*process_capture_result)(const struct camera3_callback_ops *, + const camera3_capture_result_t *result); + + /** + * notify: + * + * Asynchronous notification callback from the HAL, fired for various + * reasons. Only for information independent of frame capture, or that + * require specific timing. The ownership of the message structure remains + * with the HAL, and the msg only needs to be valid for the duration of this + * call. + * + * Multiple threads may call notify() simultaneously. + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * + * The notification for the start of exposure for a given request must be + * sent by the HAL before the first call to process_capture_result() for + * that request is made. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Buffers delivered to the framework will not be dispatched to the + * application layer until a start of exposure timestamp (or input image's + * start of exposure timestamp for a reprocess request) has been received + * via a SHUTTER notify() call. It is highly recommended to dispatch this + * call as early as possible. + * + * ------------------------------------------------------------------------ + * Performance requirements: + * + * This is a non-blocking call. The framework will return this call in 5ms. + */ + void (*notify)(const struct camera3_callback_ops *, + const camera3_notify_msg_t *msg); + +} camera3_callback_ops_t; + +/********************************************************************** + * + * Camera device operations + * + */ +typedef struct camera3_device_ops { + + /** + * initialize: + * + * One-time initialization to pass framework callback function pointers to + * the HAL. Will be called once after a successful open() call, before any + * other functions are called on the camera3_device_ops structure. + * + * Performance requirements: + * + * This should be a non-blocking call. The HAL should return from this call + * in 5ms, and must return from this call in 10ms. + * + * Return values: + * + * 0: On successful initialization + * + * -ENODEV: If initialization fails. Only close() can be called successfully + * by the framework after this. + */ + int (*initialize)(const struct camera3_device *, + const camera3_callback_ops_t *callback_ops); + + /********************************************************************** + * Stream management + */ + + /** + * configure_streams: + * + * CAMERA_DEVICE_API_VERSION_3_0 only: + * + * Reset the HAL camera device processing pipeline and set up new input and + * output streams. This call replaces any existing stream configuration with + * the streams defined in the stream_list. This method will be called at + * least once after initialize() before a request is submitted with + * process_capture_request(). + * + * The stream_list must contain at least one output-capable stream, and may + * not contain more than one input-capable stream. + * + * The stream_list may contain streams that are also in the currently-active + * set of streams (from the previous call to configure_stream()). These + * streams will already have valid values for usage, max_buffers, and the + * private pointer. + * + * If such a stream has already had its buffers registered, + * register_stream_buffers() will not be called again for the stream, and + * buffers from the stream can be immediately included in input requests. + * + * If the HAL needs to change the stream configuration for an existing + * stream due to the new configuration, it may rewrite the values of usage + * and/or max_buffers during the configure call. + * + * The framework will detect such a change, and will then reallocate the + * stream buffers, and call register_stream_buffers() again before using + * buffers from that stream in a request. + * + * If a currently-active stream is not included in stream_list, the HAL may + * safely remove any references to that stream. It will not be reused in a + * later configure() call by the framework, and all the gralloc buffers for + * it will be freed after the configure_streams() call returns. + * + * The stream_list structure is owned by the framework, and may not be + * accessed once this call completes. The address of an individual + * camera3_stream_t structure will remain valid for access by the HAL until + * the end of the first configure_stream() call which no longer includes + * that camera3_stream_t in the stream_list argument. The HAL may not change + * values in the stream structure outside of the private pointer, except for + * the usage and max_buffers members during the configure_streams() call + * itself. + * + * If the stream is new, the usage, max_buffer, and private pointer fields + * of the stream structure will all be set to 0. The HAL device must set + * these fields before the configure_streams() call returns. These fields + * are then used by the framework and the platform gralloc module to + * allocate the gralloc buffers for each stream. + * + * Before such a new stream can have its buffers included in a capture + * request, the framework will call register_stream_buffers() with that + * stream. However, the framework is not required to register buffers for + * _all_ streams before submitting a request. This allows for quick startup + * of (for example) a preview stream, with allocation for other streams + * happening later or concurrently. + * + * ------------------------------------------------------------------------ + * CAMERA_DEVICE_API_VERSION_3_1 only: + * + * Reset the HAL camera device processing pipeline and set up new input and + * output streams. This call replaces any existing stream configuration with + * the streams defined in the stream_list. This method will be called at + * least once after initialize() before a request is submitted with + * process_capture_request(). + * + * The stream_list must contain at least one output-capable stream, and may + * not contain more than one input-capable stream. + * + * The stream_list may contain streams that are also in the currently-active + * set of streams (from the previous call to configure_stream()). These + * streams will already have valid values for usage, max_buffers, and the + * private pointer. + * + * If such a stream has already had its buffers registered, + * register_stream_buffers() will not be called again for the stream, and + * buffers from the stream can be immediately included in input requests. + * + * If the HAL needs to change the stream configuration for an existing + * stream due to the new configuration, it may rewrite the values of usage + * and/or max_buffers during the configure call. + * + * The framework will detect such a change, and will then reallocate the + * stream buffers, and call register_stream_buffers() again before using + * buffers from that stream in a request. + * + * If a currently-active stream is not included in stream_list, the HAL may + * safely remove any references to that stream. It will not be reused in a + * later configure() call by the framework, and all the gralloc buffers for + * it will be freed after the configure_streams() call returns. + * + * The stream_list structure is owned by the framework, and may not be + * accessed once this call completes. The address of an individual + * camera3_stream_t structure will remain valid for access by the HAL until + * the end of the first configure_stream() call which no longer includes + * that camera3_stream_t in the stream_list argument. The HAL may not change + * values in the stream structure outside of the private pointer, except for + * the usage and max_buffers members during the configure_streams() call + * itself. + * + * If the stream is new, max_buffer, and private pointer fields of the + * stream structure will all be set to 0. The usage will be set to the + * consumer usage flags. The HAL device must set these fields before the + * configure_streams() call returns. These fields are then used by the + * framework and the platform gralloc module to allocate the gralloc + * buffers for each stream. + * + * Before such a new stream can have its buffers included in a capture + * request, the framework will call register_stream_buffers() with that + * stream. However, the framework is not required to register buffers for + * _all_ streams before submitting a request. This allows for quick startup + * of (for example) a preview stream, with allocation for other streams + * happening later or concurrently. + * + * ------------------------------------------------------------------------ + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Reset the HAL camera device processing pipeline and set up new input and + * output streams. This call replaces any existing stream configuration with + * the streams defined in the stream_list. This method will be called at + * least once after initialize() before a request is submitted with + * process_capture_request(). + * + * The stream_list must contain at least one output-capable stream, and may + * not contain more than one input-capable stream. + * + * The stream_list may contain streams that are also in the currently-active + * set of streams (from the previous call to configure_stream()). These + * streams will already have valid values for usage, max_buffers, and the + * private pointer. + * + * If the HAL needs to change the stream configuration for an existing + * stream due to the new configuration, it may rewrite the values of usage + * and/or max_buffers during the configure call. + * + * The framework will detect such a change, and may then reallocate the + * stream buffers before using buffers from that stream in a request. + * + * If a currently-active stream is not included in stream_list, the HAL may + * safely remove any references to that stream. It will not be reused in a + * later configure() call by the framework, and all the gralloc buffers for + * it will be freed after the configure_streams() call returns. + * + * The stream_list structure is owned by the framework, and may not be + * accessed once this call completes. The address of an individual + * camera3_stream_t structure will remain valid for access by the HAL until + * the end of the first configure_stream() call which no longer includes + * that camera3_stream_t in the stream_list argument. The HAL may not change + * values in the stream structure outside of the private pointer, except for + * the usage and max_buffers members during the configure_streams() call + * itself. + * + * If the stream is new, max_buffer, and private pointer fields of the + * stream structure will all be set to 0. The usage will be set to the + * consumer usage flags. The HAL device must set these fields before the + * configure_streams() call returns. These fields are then used by the + * framework and the platform gralloc module to allocate the gralloc + * buffers for each stream. + * + * Newly allocated buffers may be included in a capture request at any time + * by the framework. Once a gralloc buffer is returned to the framework + * with process_capture_result (and its respective release_fence has been + * signaled) the framework may free or reuse it at any time. + * + * ------------------------------------------------------------------------ + * + * Preconditions: + * + * The framework will only call this method when no captures are being + * processed. That is, all results have been returned to the framework, and + * all in-flight input and output buffers have been returned and their + * release sync fences have been signaled by the HAL. The framework will not + * submit new requests for capture while the configure_streams() call is + * underway. + * + * Postconditions: + * + * The HAL device must configure itself to provide maximum possible output + * frame rate given the sizes and formats of the output streams, as + * documented in the camera device's static metadata. + * + * Performance requirements: + * + * This call is expected to be heavyweight and possibly take several hundred + * milliseconds to complete, since it may require resetting and + * reconfiguring the image sensor and the camera processing pipeline. + * Nevertheless, the HAL device should attempt to minimize the + * reconfiguration delay to minimize the user-visible pauses during + * application operational mode changes (such as switching from still + * capture to video recording). + * + * The HAL should return from this call in 500ms, and must return from this + * call in 1000ms. + * + * Return values: + * + * 0: On successful stream configuration + * + * -EINVAL: If the requested stream configuration is invalid. Some examples + * of invalid stream configurations include: + * + * - Including more than 1 input-capable stream (INPUT or + * BIDIRECTIONAL) + * + * - Not including any output-capable streams (OUTPUT or + * BIDIRECTIONAL) + * + * - Including streams with unsupported formats, or an unsupported + * size for that format. + * + * - Including too many output streams of a certain format. + * + * - Unsupported rotation configuration (only applies to + * devices with version >= CAMERA_DEVICE_API_VERSION_3_3) + * + * - Stream sizes/formats don't satisfy the + * camera3_stream_configuration_t->operation_mode requirements for non-NORMAL mode, + * or the requested operation_mode is not supported by the HAL. + * (only applies to devices with version >= CAMERA_DEVICE_API_VERSION_3_3) + * + * Note that the framework submitting an invalid stream + * configuration is not normal operation, since stream + * configurations are checked before configure. An invalid + * configuration means that a bug exists in the framework code, or + * there is a mismatch between the HAL's static metadata and the + * requirements on streams. + * + * -ENODEV: If there has been a fatal error and the device is no longer + * operational. Only close() can be called successfully by the + * framework after this error is returned. + */ + int (*configure_streams)(const struct camera3_device *, + camera3_stream_configuration_t *stream_list); + + /** + * register_stream_buffers: + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * DEPRECATED. This will not be called and must be set to NULL. + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * + * Register buffers for a given stream with the HAL device. This method is + * called by the framework after a new stream is defined by + * configure_streams, and before buffers from that stream are included in a + * capture request. If the same stream is listed in a subsequent + * configure_streams() call, register_stream_buffers will _not_ be called + * again for that stream. + * + * The framework does not need to register buffers for all configured + * streams before it submits the first capture request. This allows quick + * startup for preview (or similar use cases) while other streams are still + * being allocated. + * + * This method is intended to allow the HAL device to map or otherwise + * prepare the buffers for later use. The buffers passed in will already be + * locked for use. At the end of the call, all the buffers must be ready to + * be returned to the stream. The buffer_set argument is only valid for the + * duration of this call. + * + * If the stream format was set to HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, + * the camera HAL should inspect the passed-in buffers here to determine any + * platform-private pixel format information. + * + * Performance requirements: + * + * This should be a non-blocking call. The HAL should return from this call + * in 1ms, and must return from this call in 5ms. + * + * Return values: + * + * 0: On successful registration of the new stream buffers + * + * -EINVAL: If the stream_buffer_set does not refer to a valid active + * stream, or if the buffers array is invalid. + * + * -ENOMEM: If there was a failure in registering the buffers. The framework + * must consider all the stream buffers to be unregistered, and can + * try to register again later. + * + * -ENODEV: If there is a fatal error, and the device is no longer + * operational. Only close() can be called successfully by the + * framework after this error is returned. + */ + int (*register_stream_buffers)(const struct camera3_device *, + const camera3_stream_buffer_set_t *buffer_set); + + /********************************************************************** + * Request creation and submission + */ + + /** + * construct_default_request_settings: + * + * Create capture settings for standard camera use cases. + * + * The device must return a settings buffer that is configured to meet the + * requested use case, which must be one of the CAMERA3_TEMPLATE_* + * enums. All request control fields must be included. + * + * The HAL retains ownership of this structure, but the pointer to the + * structure must be valid until the device is closed. The framework and the + * HAL may not modify the buffer once it is returned by this call. The same + * buffer may be returned for subsequent calls for the same template, or for + * other templates. + * + * Performance requirements: + * + * This should be a non-blocking call. The HAL should return from this call + * in 1ms, and must return from this call in 5ms. + * + * Return values: + * + * Valid metadata: On successful creation of a default settings + * buffer. + * + * NULL: In case of a fatal error. After this is returned, only + * the close() method can be called successfully by the + * framework. + */ + const camera_metadata_t* (*construct_default_request_settings)( + const struct camera3_device *, + int type); + + /** + * process_capture_request: + * + * Send a new capture request to the HAL. The HAL should not return from + * this call until it is ready to accept the next request to process. Only + * one call to process_capture_request() will be made at a time by the + * framework, and the calls will all be from the same thread. The next call + * to process_capture_request() will be made as soon as a new request and + * its associated buffers are available. In a normal preview scenario, this + * means the function will be called again by the framework almost + * instantly. + * + * The actual request processing is asynchronous, with the results of + * capture being returned by the HAL through the process_capture_result() + * call. This call requires the result metadata to be available, but output + * buffers may simply provide sync fences to wait on. Multiple requests are + * expected to be in flight at once, to maintain full output frame rate. + * + * The framework retains ownership of the request structure. It is only + * guaranteed to be valid during this call. The HAL device must make copies + * of the information it needs to retain for the capture processing. The HAL + * is responsible for waiting on and closing the buffers' fences and + * returning the buffer handles to the framework. + * + * The HAL must write the file descriptor for the input buffer's release + * sync fence into input_buffer->release_fence, if input_buffer is not + * NULL. If the HAL returns -1 for the input buffer release sync fence, the + * framework is free to immediately reuse the input buffer. Otherwise, the + * framework will wait on the sync fence before refilling and reusing the + * input buffer. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The input/output buffers provided by the framework in each request + * may be brand new (having never before seen by the HAL). + * + * ------------------------------------------------------------------------ + * Performance considerations: + * + * Handling a new buffer should be extremely lightweight and there should be + * no frame rate degradation or frame jitter introduced. + * + * This call must return fast enough to ensure that the requested frame + * rate can be sustained, especially for streaming cases (post-processing + * quality settings set to FAST). The HAL should return this call in 1 + * frame interval, and must return from this call in 4 frame intervals. + * + * Return values: + * + * 0: On a successful start to processing the capture request + * + * -EINVAL: If the input is malformed (the settings are NULL when not + * allowed, invalid physical camera settings, + * there are 0 output buffers, etc) and capture processing + * cannot start. Failures during request processing should be + * handled by calling camera3_callback_ops_t.notify(). In case of + * this error, the framework will retain responsibility for the + * stream buffers' fences and the buffer handles; the HAL should + * not close the fences or return these buffers with + * process_capture_result. + * + * -ENODEV: If the camera device has encountered a serious error. After this + * error is returned, only the close() method can be successfully + * called by the framework. + * + */ + int (*process_capture_request)(const struct camera3_device *, + camera3_capture_request_t *request); + + /********************************************************************** + * Miscellaneous methods + */ + + /** + * get_metadata_vendor_tag_ops: + * + * Get methods to query for vendor extension metadata tag information. The + * HAL should fill in all the vendor tag operation methods, or leave ops + * unchanged if no vendor tags are defined. + * + * The definition of vendor_tag_query_ops_t can be found in + * system/media/camera/include/system/camera_metadata.h. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * DEPRECATED. This function has been deprecated and should be set to + * NULL by the HAL. Please implement get_vendor_tag_ops in camera_common.h + * instead. + */ + void (*get_metadata_vendor_tag_ops)(const struct camera3_device*, + vendor_tag_query_ops_t* ops); + + /** + * dump: + * + * Print out debugging state for the camera device. This will be called by + * the framework when the camera service is asked for a debug dump, which + * happens when using the dumpsys tool, or when capturing a bugreport. + * + * The passed-in file descriptor can be used to write debugging text using + * dprintf() or write(). The text should be in ASCII encoding only. + * + * Performance requirements: + * + * This must be a non-blocking call. The HAL should return from this call + * in 1ms, must return from this call in 10ms. This call must avoid + * deadlocks, as it may be called at any point during camera operation. + * Any synchronization primitives used (such as mutex locks or semaphores) + * should be acquired with a timeout. + */ + void (*dump)(const struct camera3_device *, int fd); + + /** + * flush: + * + * Flush all currently in-process captures and all buffers in the pipeline + * on the given device. The framework will use this to dump all state as + * quickly as possible in order to prepare for a configure_streams() call. + * + * No buffers are required to be successfully returned, so every buffer + * held at the time of flush() (whether successfully filled or not) may be + * returned with CAMERA3_BUFFER_STATUS_ERROR. Note the HAL is still allowed + * to return valid (CAMERA3_BUFFER_STATUS_OK) buffers during this call, + * provided they are successfully filled. + * + * All requests currently in the HAL are expected to be returned as soon as + * possible. Not-in-process requests should return errors immediately. Any + * interruptible hardware blocks should be stopped, and any uninterruptible + * blocks should be waited on. + * + * flush() may be called concurrently to process_capture_request(), with the expectation that + * process_capture_request will return quickly and the request submitted in that + * process_capture_request call is treated like all other in-flight requests. Due to + * concurrency issues, it is possible that from the HAL's point of view, a + * process_capture_request() call may be started after flush has been invoked but has not + * returned yet. If such a call happens before flush() returns, the HAL should treat the new + * capture request like other in-flight pending requests (see #4 below). + * + * More specifically, the HAL must follow below requirements for various cases: + * + * 1. For captures that are too late for the HAL to cancel/stop, and will be + * completed normally by the HAL; i.e. the HAL can send shutter/notify and + * process_capture_result and buffers as normal. + * + * 2. For pending requests that have not done any processing, the HAL must call notify + * CAMERA3_MSG_ERROR_REQUEST, and return all the output buffers with + * process_capture_result in the error state (CAMERA3_BUFFER_STATUS_ERROR). + * The HAL must not place the release fence into an error state, instead, + * the release fences must be set to the acquire fences passed by the framework, + * or -1 if they have been waited on by the HAL already. This is also the path + * to follow for any captures for which the HAL already called notify() with + * CAMERA3_MSG_SHUTTER but won't be producing any metadata/valid buffers for. + * After CAMERA3_MSG_ERROR_REQUEST, for a given frame, only process_capture_results with + * buffers in CAMERA3_BUFFER_STATUS_ERROR are allowed. No further notifys or + * process_capture_result with non-null metadata is allowed. + * + * 3. For partially completed pending requests that will not have all the output + * buffers or perhaps missing metadata, the HAL should follow below: + * + * 3.1. Call notify with CAMERA3_MSG_ERROR_RESULT if some of the expected result + * metadata (i.e. one or more partial metadata) won't be available for the capture. + * + * 3.2. Call notify with CAMERA3_MSG_ERROR_BUFFER for every buffer that won't + * be produced for the capture. + * + * 3.3 Call notify with CAMERA3_MSG_SHUTTER with the capture timestamp before + * any buffers/metadata are returned with process_capture_result. + * + * 3.4 For captures that will produce some results, the HAL must not call + * CAMERA3_MSG_ERROR_REQUEST, since that indicates complete failure. + * + * 3.5. Valid buffers/metadata should be passed to the framework as normal. + * + * 3.6. Failed buffers should be returned to the framework as described for case 2. + * But failed buffers do not have to follow the strict ordering valid buffers do, + * and may be out-of-order with respect to valid buffers. For example, if buffers + * A, B, C, D, E are sent, D and E are failed, then A, E, B, D, C is an acceptable + * return order. + * + * 3.7. For fully-missing metadata, calling CAMERA3_MSG_ERROR_RESULT is sufficient, no + * need to call process_capture_result with NULL metadata or equivalent. + * + * 4. If a flush() is invoked while a process_capture_request() invocation is active, that + * process call should return as soon as possible. In addition, if a process_capture_request() + * call is made after flush() has been invoked but before flush() has returned, the + * capture request provided by the late process_capture_request call should be treated like + * a pending request in case #2 above. + * + * flush() should only return when there are no more outstanding buffers or + * requests left in the HAL. The framework may call configure_streams (as + * the HAL state is now quiesced) or may issue new requests. + * + * Note that it's sufficient to only support fully-succeeded and fully-failed result cases. + * However, it is highly desirable to support the partial failure cases as well, as it + * could help improve the flush call overall performance. + * + * Performance requirements: + * + * The HAL should return from this call in 100ms, and must return from this + * call in 1000ms. And this call must not be blocked longer than pipeline + * latency (see S7 for definition). + * + * Version information: + * + * only available if device version >= CAMERA_DEVICE_API_VERSION_3_1. + * + * Return values: + * + * 0: On a successful flush of the camera HAL. + * + * -EINVAL: If the input is malformed (the device is not valid). + * + * -ENODEV: If the camera device has encountered a serious error. After this + * error is returned, only the close() method can be successfully + * called by the framework. + */ + int (*flush)(const struct camera3_device *); + + /* reserved for future use */ + void *reserved[8]; +} camera3_device_ops_t; + +/********************************************************************** + * + * Camera device definition + * + */ +typedef struct camera3_device { + /** + * common.version must equal CAMERA_DEVICE_API_VERSION_3_0 to identify this + * device as implementing version 3.0 of the camera device HAL. + * + * Performance requirements: + * + * Camera open (common.module->common.methods->open) should return in 200ms, and must return + * in 500ms. + * Camera close (common.close) should return in 200ms, and must return in 500ms. + * + */ + hw_device_t common; + camera3_device_ops_t *ops; + void *priv; +} camera3_device_t; + +__END_DECLS + +#endif /* #ifdef ANDROID_INCLUDE_CAMERA3_H */ diff --git a/spider-cam/libcamera/include/android/hardware/libhardware/include/hardware/camera_common.h b/spider-cam/libcamera/include/android/hardware/libhardware/include/hardware/camera_common.h new file mode 100644 index 0000000..5c9bc06 --- /dev/null +++ b/spider-cam/libcamera/include/android/hardware/libhardware/include/hardware/camera_common.h @@ -0,0 +1,919 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// FIXME: add well-defined names for cameras + +#ifndef ANDROID_INCLUDE_CAMERA_COMMON_H +#define ANDROID_INCLUDE_CAMERA_COMMON_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +__BEGIN_DECLS + +/** + * The id of this module + */ +#define CAMERA_HARDWARE_MODULE_ID "camera" + +/** + * Module versioning information for the Camera hardware module, based on + * camera_module_t.common.module_api_version. The two most significant hex + * digits represent the major version, and the two least significant represent + * the minor version. + * + ******************************************************************************* + * Versions: 0.X - 1.X [CAMERA_MODULE_API_VERSION_1_0] + * + * Camera modules that report these version numbers implement the initial + * camera module HAL interface. All camera devices openable through this + * module support only version 1 of the camera device HAL. The device_version + * and static_camera_characteristics fields of camera_info are not valid. Only + * the android.hardware.Camera API can be supported by this module and its + * devices. + * + ******************************************************************************* + * Version: 2.0 [CAMERA_MODULE_API_VERSION_2_0] + * + * Camera modules that report this version number implement the second version + * of the camera module HAL interface. Camera devices openable through this + * module may support either version 1.0 or version 2.0 of the camera device + * HAL interface. The device_version field of camera_info is always valid; the + * static_camera_characteristics field of camera_info is valid if the + * device_version field is 2.0 or higher. + * + ******************************************************************************* + * Version: 2.1 [CAMERA_MODULE_API_VERSION_2_1] + * + * This camera module version adds support for asynchronous callbacks to the + * framework from the camera HAL module, which is used to notify the framework + * about changes to the camera module state. Modules that provide a valid + * set_callbacks() method must report at least this version number. + * + ******************************************************************************* + * Version: 2.2 [CAMERA_MODULE_API_VERSION_2_2] + * + * This camera module version adds vendor tag support from the module, and + * deprecates the old vendor_tag_query_ops that were previously only + * accessible with a device open. + * + ******************************************************************************* + * Version: 2.3 [CAMERA_MODULE_API_VERSION_2_3] + * + * This camera module version adds open legacy camera HAL device support. + * Framework can use it to open the camera device as lower device HAL version + * HAL device if the same device can support multiple device API versions. + * The standard hardware module open call (common.methods->open) continues + * to open the camera device with the latest supported version, which is + * also the version listed in camera_info_t.device_version. + * + ******************************************************************************* + * Version: 2.4 [CAMERA_MODULE_API_VERSION_2_4] + * + * This camera module version adds below API changes: + * + * 1. Torch mode support. The framework can use it to turn on torch mode for + * any camera device that has a flash unit, without opening a camera device. The + * camera device has a higher priority accessing the flash unit than the camera + * module; opening a camera device will turn off the torch if it had been enabled + * through the module interface. When there are any resource conflicts, such as + * open() is called to open a camera device, the camera HAL module must notify the + * framework through the torch mode status callback that the torch mode has been + * turned off. + * + * 2. External camera (e.g. USB hot-plug camera) support. The API updates specify that + * the camera static info is only available when camera is connected and ready to + * use for external hot-plug cameras. Calls to get static info will be invalid + * calls when camera status is not CAMERA_DEVICE_STATUS_PRESENT. The frameworks + * will only count on device status change callbacks to manage the available external + * camera list. + * + * 3. Camera arbitration hints. This module version adds support for explicitly + * indicating the number of camera devices that can be simultaneously opened and used. + * To specify valid combinations of devices, the resource_cost and conflicting_devices + * fields should always be set in the camera_info structure returned by the + * get_camera_info call. + * + * 4. Module initialization method. This will be called by the camera service + * right after the HAL module is loaded, to allow for one-time initialization + * of the HAL. It is called before any other module methods are invoked. + */ + +/** + * Predefined macros for currently-defined version numbers + */ + +/** + * All module versions <= HARDWARE_MODULE_API_VERSION(1, 0xFF) must be treated + * as CAMERA_MODULE_API_VERSION_1_0 + */ +#define CAMERA_MODULE_API_VERSION_1_0 HARDWARE_MODULE_API_VERSION(1, 0) +#define CAMERA_MODULE_API_VERSION_2_0 HARDWARE_MODULE_API_VERSION(2, 0) +#define CAMERA_MODULE_API_VERSION_2_1 HARDWARE_MODULE_API_VERSION(2, 1) +#define CAMERA_MODULE_API_VERSION_2_2 HARDWARE_MODULE_API_VERSION(2, 2) +#define CAMERA_MODULE_API_VERSION_2_3 HARDWARE_MODULE_API_VERSION(2, 3) +#define CAMERA_MODULE_API_VERSION_2_4 HARDWARE_MODULE_API_VERSION(2, 4) + +#define CAMERA_MODULE_API_VERSION_CURRENT CAMERA_MODULE_API_VERSION_2_4 + +/** + * All device versions <= HARDWARE_DEVICE_API_VERSION(1, 0xFF) must be treated + * as CAMERA_DEVICE_API_VERSION_1_0 + */ +#define CAMERA_DEVICE_API_VERSION_1_0 HARDWARE_DEVICE_API_VERSION(1, 0) // DEPRECATED +#define CAMERA_DEVICE_API_VERSION_2_0 HARDWARE_DEVICE_API_VERSION(2, 0) // NO LONGER SUPPORTED +#define CAMERA_DEVICE_API_VERSION_2_1 HARDWARE_DEVICE_API_VERSION(2, 1) // NO LONGER SUPPORTED +#define CAMERA_DEVICE_API_VERSION_3_0 HARDWARE_DEVICE_API_VERSION(3, 0) // NO LONGER SUPPORTED +#define CAMERA_DEVICE_API_VERSION_3_1 HARDWARE_DEVICE_API_VERSION(3, 1) // NO LONGER SUPPORTED +#define CAMERA_DEVICE_API_VERSION_3_2 HARDWARE_DEVICE_API_VERSION(3, 2) +#define CAMERA_DEVICE_API_VERSION_3_3 HARDWARE_DEVICE_API_VERSION(3, 3) +#define CAMERA_DEVICE_API_VERSION_3_4 HARDWARE_DEVICE_API_VERSION(3, 4) +#define CAMERA_DEVICE_API_VERSION_3_5 HARDWARE_DEVICE_API_VERSION(3, 5) + +// Device version 3.5 is current, older HAL camera device versions are not +// recommended for new devices. +#define CAMERA_DEVICE_API_VERSION_CURRENT CAMERA_DEVICE_API_VERSION_3_5 + +/** + * Defined in /system/media/camera/include/system/camera_metadata.h + */ +typedef struct camera_metadata camera_metadata_t; + +typedef struct camera_info { + /** + * The direction that the camera faces to. See system/core/include/system/camera.h + * for camera facing definitions. + * + * Version information (based on camera_module_t.common.module_api_version): + * + * CAMERA_MODULE_API_VERSION_2_3 or lower: + * + * It should be CAMERA_FACING_BACK or CAMERA_FACING_FRONT. + * + * CAMERA_MODULE_API_VERSION_2_4 or higher: + * + * It should be CAMERA_FACING_BACK, CAMERA_FACING_FRONT or + * CAMERA_FACING_EXTERNAL. + */ + int facing; + + /** + * The orientation of the camera image. The value is the angle that the + * camera image needs to be rotated clockwise so it shows correctly on the + * display in its natural orientation. It should be 0, 90, 180, or 270. + * + * For example, suppose a device has a naturally tall screen. The + * back-facing camera sensor is mounted in landscape. You are looking at the + * screen. If the top side of the camera sensor is aligned with the right + * edge of the screen in natural orientation, the value should be 90. If the + * top side of a front-facing camera sensor is aligned with the right of the + * screen, the value should be 270. + * + * Version information (based on camera_module_t.common.module_api_version): + * + * CAMERA_MODULE_API_VERSION_2_3 or lower: + * + * Valid in all camera_module versions. + * + * CAMERA_MODULE_API_VERSION_2_4 or higher: + * + * Valid if camera facing is CAMERA_FACING_BACK or CAMERA_FACING_FRONT, + * not valid if camera facing is CAMERA_FACING_EXTERNAL. + */ + int orientation; + + /** + * The value of camera_device_t.common.version. + * + * Version information (based on camera_module_t.common.module_api_version): + * + * CAMERA_MODULE_API_VERSION_1_0: + * + * Not valid. Can be assumed to be CAMERA_DEVICE_API_VERSION_1_0. Do + * not read this field. + * + * CAMERA_MODULE_API_VERSION_2_0 or higher: + * + * Always valid + * + */ + uint32_t device_version; + + /** + * The camera's fixed characteristics, which include all static camera metadata + * specified in system/media/camera/docs/docs.html. This should be a sorted metadata + * buffer, and may not be modified or freed by the caller. The pointer should remain + * valid for the lifetime of the camera module, and values in it may not + * change after it is returned by get_camera_info(). + * + * Version information (based on camera_module_t.common.module_api_version): + * + * CAMERA_MODULE_API_VERSION_1_0: + * + * Not valid. Extra characteristics are not available. Do not read this + * field. + * + * CAMERA_MODULE_API_VERSION_2_0 or higher: + * + * Valid if device_version >= CAMERA_DEVICE_API_VERSION_2_0. Do not read + * otherwise. + * + */ + const camera_metadata_t *static_camera_characteristics; + + /** + * The total resource "cost" of using this camera, represented as an integer + * value in the range [0, 100] where 100 represents total usage of the shared + * resource that is the limiting bottleneck of the camera subsystem. This may + * be a very rough estimate, and is used as a hint to the camera service to + * determine when to disallow multiple applications from simultaneously + * opening different cameras advertised by the camera service. + * + * The camera service must be able to simultaneously open and use any + * combination of camera devices exposed by the HAL where the sum of + * the resource costs of these cameras is <= 100. For determining cost, + * each camera device must be assumed to be configured and operating at + * the maximally resource-consuming framerate and stream size settings + * available in the configuration settings exposed for that device through + * the camera metadata. + * + * The camera service may still attempt to simultaneously open combinations + * of camera devices with a total resource cost > 100. This may succeed or + * fail. If this succeeds, combinations of configurations that are not + * supported due to resource constraints from having multiple open devices + * should fail during the configure calls. If the total resource cost is + * <= 100, open and configure should never fail for any stream configuration + * settings or other device capabilities that would normally succeed for a + * device when it is the only open camera device. + * + * This field will be used to determine whether background applications are + * allowed to use this camera device while other applications are using other + * camera devices. Note: multiple applications will never be allowed by the + * camera service to simultaneously open the same camera device. + * + * Example use cases: + * + * Ex. 1: Camera Device 0 = Back Camera + * Camera Device 1 = Front Camera + * - Using both camera devices causes a large framerate slowdown due to + * limited ISP bandwidth. + * + * Configuration: + * + * Camera Device 0 - resource_cost = 51 + * conflicting_devices = null + * Camera Device 1 - resource_cost = 51 + * conflicting_devices = null + * + * Result: + * + * Since the sum of the resource costs is > 100, if a higher-priority + * application has either device open, no lower-priority applications will be + * allowed by the camera service to open either device. If a lower-priority + * application is using a device that a higher-priority subsequently attempts + * to open, the lower-priority application will be forced to disconnect the + * the device. + * + * If the highest-priority application chooses, it may still attempt to open + * both devices (since these devices are not listed as conflicting in the + * conflicting_devices fields), but usage of these devices may fail in the + * open or configure calls. + * + * Ex. 2: Camera Device 0 = Left Back Camera + * Camera Device 1 = Right Back Camera + * Camera Device 2 = Combined stereo camera using both right and left + * back camera sensors used by devices 0, and 1 + * Camera Device 3 = Front Camera + * - Due to do hardware constraints, up to two cameras may be open at once. The + * combined stereo camera may never be used at the same time as either of the + * two back camera devices (device 0, 1), and typically requires too much + * bandwidth to use at the same time as the front camera (device 3). + * + * Configuration: + * + * Camera Device 0 - resource_cost = 50 + * conflicting_devices = { 2 } + * Camera Device 1 - resource_cost = 50 + * conflicting_devices = { 2 } + * Camera Device 2 - resource_cost = 100 + * conflicting_devices = { 0, 1 } + * Camera Device 3 - resource_cost = 50 + * conflicting_devices = null + * + * Result: + * + * Based on the conflicting_devices fields, the camera service guarantees that + * the following sets of open devices will never be allowed: { 1, 2 }, { 0, 2 }. + * + * Based on the resource_cost fields, if a high-priority foreground application + * is using camera device 0, a background application would be allowed to open + * camera device 1 or 3 (but would be forced to disconnect it again if the + * foreground application opened another device). + * + * The highest priority application may still attempt to simultaneously open + * devices 0, 2, and 3, but the HAL may fail in open or configure calls for + * this combination. + * + * Ex. 3: Camera Device 0 = Back Camera + * Camera Device 1 = Front Camera + * Camera Device 2 = Low-power Front Camera that uses the same + * sensor as device 1, but only exposes image stream + * resolutions that can be used in low-power mode + * - Using both front cameras (device 1, 2) at the same time is impossible due + * a shared physical sensor. Using the back and "high-power" front camera + * (device 1) may be impossible for some stream configurations due to hardware + * limitations, but the "low-power" front camera option may always be used as + * it has special dedicated hardware. + * + * Configuration: + * + * Camera Device 0 - resource_cost = 100 + * conflicting_devices = null + * Camera Device 1 - resource_cost = 100 + * conflicting_devices = { 2 } + * Camera Device 2 - resource_cost = 0 + * conflicting_devices = { 1 } + * Result: + * + * Based on the conflicting_devices fields, the camera service guarantees that + * the following sets of open devices will never be allowed: { 1, 2 }. + * + * Based on the resource_cost fields, only the highest priority application + * may attempt to open both device 0 and 1 at the same time. If a higher-priority + * application is not using device 1 or 2, a low-priority background application + * may open device 2 (but will be forced to disconnect it if a higher-priority + * application subsequently opens device 1 or 2). + * + * Version information (based on camera_module_t.common.module_api_version): + * + * CAMERA_MODULE_API_VERSION_2_3 or lower: + * + * Not valid. Can be assumed to be 100. Do not read this field. + * + * CAMERA_MODULE_API_VERSION_2_4 or higher: + * + * Always valid. + */ + int resource_cost; + + /** + * An array of camera device IDs represented as NULL-terminated strings + * indicating other devices that cannot be simultaneously opened while this + * camera device is in use. + * + * This field is intended to be used to indicate that this camera device + * is a composite of several other camera devices, or otherwise has + * hardware dependencies that prohibit simultaneous usage. If there are no + * dependencies, a NULL may be returned in this field to indicate this. + * + * The camera service will never simultaneously open any of the devices + * in this list while this camera device is open. + * + * The strings pointed to in this field will not be cleaned up by the camera + * service, and must remain while this device is plugged in. + * + * Version information (based on camera_module_t.common.module_api_version): + * + * CAMERA_MODULE_API_VERSION_2_3 or lower: + * + * Not valid. Can be assumed to be NULL. Do not read this field. + * + * CAMERA_MODULE_API_VERSION_2_4 or higher: + * + * Always valid. + */ + char** conflicting_devices; + + /** + * The length of the array given in the conflicting_devices field. + * + * Version information (based on camera_module_t.common.module_api_version): + * + * CAMERA_MODULE_API_VERSION_2_3 or lower: + * + * Not valid. Can be assumed to be 0. Do not read this field. + * + * CAMERA_MODULE_API_VERSION_2_4 or higher: + * + * Always valid. + */ + size_t conflicting_devices_length; + +} camera_info_t; + +/** + * camera_device_status_t: + * + * The current status of the camera device, as provided by the HAL through the + * camera_module_callbacks.camera_device_status_change() call. + * + * At module load time, the framework will assume all camera devices are in the + * CAMERA_DEVICE_STATUS_PRESENT state. The HAL should invoke + * camera_module_callbacks::camera_device_status_change to inform the framework + * of any initially NOT_PRESENT devices. + * + * Allowed transitions: + * PRESENT -> NOT_PRESENT + * NOT_PRESENT -> ENUMERATING + * NOT_PRESENT -> PRESENT + * ENUMERATING -> PRESENT + * ENUMERATING -> NOT_PRESENT + */ +typedef enum camera_device_status { + /** + * The camera device is not currently connected, and opening it will return + * failure. + * + * Version information (based on camera_module_t.common.module_api_version): + * + * CAMERA_MODULE_API_VERSION_2_3 or lower: + * + * Calls to get_camera_info must still succeed, and provide the same information + * it would if the camera were connected. + * + * CAMERA_MODULE_API_VERSION_2_4: + * + * The camera device at this status must return -EINVAL for get_camera_info call, + * as the device is not connected. + */ + CAMERA_DEVICE_STATUS_NOT_PRESENT = 0, + + /** + * The camera device is connected, and opening it will succeed. + * + * CAMERA_MODULE_API_VERSION_2_3 or lower: + * + * The information returned by get_camera_info cannot change due to this status + * change. By default, the framework will assume all devices are in this state. + * + * CAMERA_MODULE_API_VERSION_2_4: + * + * The information returned by get_camera_info will become valid after a device's + * status changes to this. By default, the framework will assume all devices are in + * this state. + */ + CAMERA_DEVICE_STATUS_PRESENT = 1, + + /** + * The camera device is connected, but it is undergoing an enumeration and + * so opening the device will return -EBUSY. + * + * CAMERA_MODULE_API_VERSION_2_3 or lower: + * + * Calls to get_camera_info must still succeed, as if the camera was in the + * PRESENT status. + * + * CAMERA_MODULE_API_VERSION_2_4: + * + * The camera device at this status must return -EINVAL for get_camera_info for call, + * as the device is not ready. + */ + CAMERA_DEVICE_STATUS_ENUMERATING = 2, + +} camera_device_status_t; + +/** + * torch_mode_status_t: + * + * The current status of the torch mode, as provided by the HAL through the + * camera_module_callbacks.torch_mode_status_change() call. + * + * The torch mode status of a camera device is applicable only when the camera + * device is present. The framework will not call set_torch_mode() to turn on + * torch mode of a camera device if the camera device is not present. At module + * load time, the framework will assume torch modes are in the + * TORCH_MODE_STATUS_AVAILABLE_OFF state if the camera device is present and + * android.flash.info.available is reported as true via get_camera_info() call. + * + * The behaviors of the camera HAL module that the framework expects in the + * following situations when a camera device's status changes: + * 1. A previously-disconnected camera device becomes connected. + * After camera_module_callbacks::camera_device_status_change() is invoked + * to inform the framework that the camera device is present, the framework + * will assume the camera device's torch mode is in + * TORCH_MODE_STATUS_AVAILABLE_OFF state. The camera HAL module does not need + * to invoke camera_module_callbacks::torch_mode_status_change() unless the + * flash unit is unavailable to use by set_torch_mode(). + * + * 2. A previously-connected camera becomes disconnected. + * After camera_module_callbacks::camera_device_status_change() is invoked + * to inform the framework that the camera device is not present, the + * framework will not call set_torch_mode() for the disconnected camera + * device until its flash unit becomes available again. The camera HAL + * module does not need to invoke + * camera_module_callbacks::torch_mode_status_change() separately to inform + * that the flash unit has become unavailable. + * + * 3. open() is called to open a camera device. + * The camera HAL module must invoke + * camera_module_callbacks::torch_mode_status_change() for all flash units + * that have entered TORCH_MODE_STATUS_NOT_AVAILABLE state and can not be + * turned on by calling set_torch_mode() anymore due to this open() call. + * open() must not trigger TORCH_MODE_STATUS_AVAILABLE_OFF before + * TORCH_MODE_STATUS_NOT_AVAILABLE for all flash units that have become + * unavailable. + * + * 4. close() is called to close a camera device. + * The camera HAL module must invoke + * camera_module_callbacks::torch_mode_status_change() for all flash units + * that have entered TORCH_MODE_STATUS_AVAILABLE_OFF state and can be turned + * on by calling set_torch_mode() again because of enough resources freed + * up by this close() call. + * + * Note that the framework calling set_torch_mode() successfully must trigger + * TORCH_MODE_STATUS_AVAILABLE_OFF or TORCH_MODE_STATUS_AVAILABLE_ON callback + * for the given camera device. Additionally it must trigger + * TORCH_MODE_STATUS_AVAILABLE_OFF callbacks for other previously-on torch + * modes if HAL cannot keep multiple torch modes on simultaneously. + */ +typedef enum torch_mode_status { + + /** + * The flash unit is no longer available and the torch mode can not be + * turned on by calling set_torch_mode(). If the torch mode is on, it + * will be turned off by HAL before HAL calls torch_mode_status_change(). + */ + TORCH_MODE_STATUS_NOT_AVAILABLE = 0, + + /** + * A torch mode has become off and available to be turned on via + * set_torch_mode(). This may happen in the following + * cases: + * 1. After the resources to turn on the torch mode have become available. + * 2. After set_torch_mode() is called to turn off the torch mode. + * 3. After the framework turned on the torch mode of some other camera + * device and HAL had to turn off the torch modes of any camera devices + * that were previously on. + */ + TORCH_MODE_STATUS_AVAILABLE_OFF = 1, + + /** + * A torch mode has become on and available to be turned off via + * set_torch_mode(). This can happen only after set_torch_mode() is called + * to turn on the torch mode. + */ + TORCH_MODE_STATUS_AVAILABLE_ON = 2, + +} torch_mode_status_t; + +/** + * Callback functions for the camera HAL module to use to inform the framework + * of changes to the camera subsystem. + * + * Version information (based on camera_module_t.common.module_api_version): + * + * Each callback is called only by HAL modules implementing the indicated + * version or higher of the HAL module API interface. + * + * CAMERA_MODULE_API_VERSION_2_1: + * camera_device_status_change() + * + * CAMERA_MODULE_API_VERSION_2_4: + * torch_mode_status_change() + + */ +typedef struct camera_module_callbacks { + + /** + * camera_device_status_change: + * + * Callback to the framework to indicate that the state of a specific camera + * device has changed. At module load time, the framework will assume all + * camera devices are in the CAMERA_DEVICE_STATUS_PRESENT state. The HAL + * must call this method to inform the framework of any initially + * NOT_PRESENT devices. + * + * This callback is added for CAMERA_MODULE_API_VERSION_2_1. + * + * camera_module_callbacks: The instance of camera_module_callbacks_t passed + * to the module with set_callbacks. + * + * camera_id: The ID of the camera device that has a new status. + * + * new_status: The new status code, one of the camera_device_status_t enums, + * or a platform-specific status. + * + */ + void (*camera_device_status_change)(const struct camera_module_callbacks*, + int camera_id, + int new_status); + + /** + * torch_mode_status_change: + * + * Callback to the framework to indicate that the state of the torch mode + * of the flash unit associated with a specific camera device has changed. + * At module load time, the framework will assume the torch modes are in + * the TORCH_MODE_STATUS_AVAILABLE_OFF state if android.flash.info.available + * is reported as true via get_camera_info() call. + * + * This callback is added for CAMERA_MODULE_API_VERSION_2_4. + * + * camera_module_callbacks: The instance of camera_module_callbacks_t + * passed to the module with set_callbacks. + * + * camera_id: The ID of camera device whose flash unit has a new torch mode + * status. + * + * new_status: The new status code, one of the torch_mode_status_t enums. + */ + void (*torch_mode_status_change)(const struct camera_module_callbacks*, + const char* camera_id, + int new_status); + + +} camera_module_callbacks_t; + +typedef struct camera_module { + /** + * Common methods of the camera module. This *must* be the first member of + * camera_module as users of this structure will cast a hw_module_t to + * camera_module pointer in contexts where it's known the hw_module_t + * references a camera_module. + * + * The return values for common.methods->open for camera_module are: + * + * 0: On a successful open of the camera device. + * + * -ENODEV: The camera device cannot be opened due to an internal + * error. + * + * -EINVAL: The input arguments are invalid, i.e. the id is invalid, + * and/or the module is invalid. + * + * -EBUSY: The camera device was already opened for this camera id + * (by using this method or open_legacy), + * regardless of the device HAL version it was opened as. + * + * -EUSERS: The maximal number of camera devices that can be + * opened concurrently were opened already, either by + * this method or the open_legacy method. + * + * All other return values from common.methods->open will be treated as + * -ENODEV. + */ + hw_module_t common; + + /** + * get_number_of_cameras: + * + * Returns the number of camera devices accessible through the camera + * module. The camera devices are numbered 0 through N-1, where N is the + * value returned by this call. The name of the camera device for open() is + * simply the number converted to a string. That is, "0" for camera ID 0, + * "1" for camera ID 1. + * + * Version information (based on camera_module_t.common.module_api_version): + * + * CAMERA_MODULE_API_VERSION_2_3 or lower: + * + * The value here must be static, and cannot change after the first call + * to this method. + * + * CAMERA_MODULE_API_VERSION_2_4 or higher: + * + * The value here must be static, and must count only built-in cameras, + * which have CAMERA_FACING_BACK or CAMERA_FACING_FRONT camera facing values + * (camera_info.facing). The HAL must not include the external cameras + * (camera_info.facing == CAMERA_FACING_EXTERNAL) into the return value + * of this call. Frameworks will use camera_device_status_change callback + * to manage number of external cameras. + */ + int (*get_number_of_cameras)(void); + + /** + * get_camera_info: + * + * Return the static camera information for a given camera device. This + * information may not change for a camera device. + * + * Return values: + * + * 0: On a successful operation + * + * -ENODEV: The information cannot be provided due to an internal + * error. + * + * -EINVAL: The input arguments are invalid, i.e. the id is invalid, + * and/or the module is invalid. + * + * Version information (based on camera_module_t.common.module_api_version): + * + * CAMERA_MODULE_API_VERSION_2_4 or higher: + * + * When a camera is disconnected, its camera id becomes invalid. Calling this + * this method with this invalid camera id will get -EINVAL and NULL camera + * static metadata (camera_info.static_camera_characteristics). + */ + int (*get_camera_info)(int camera_id, struct camera_info *info); + + /** + * set_callbacks: + * + * Provide callback function pointers to the HAL module to inform framework + * of asynchronous camera module events. The framework will call this + * function once after initial camera HAL module load, after the + * get_number_of_cameras() method is called for the first time, and before + * any other calls to the module. + * + * Version information (based on camera_module_t.common.module_api_version): + * + * CAMERA_MODULE_API_VERSION_1_0, CAMERA_MODULE_API_VERSION_2_0: + * + * Not provided by HAL module. Framework may not call this function. + * + * CAMERA_MODULE_API_VERSION_2_1: + * + * Valid to be called by the framework. + * + * Return values: + * + * 0: On a successful operation + * + * -ENODEV: The operation cannot be completed due to an internal + * error. + * + * -EINVAL: The input arguments are invalid, i.e. the callbacks are + * null + */ + int (*set_callbacks)(const camera_module_callbacks_t *callbacks); + + /** + * get_vendor_tag_ops: + * + * Get methods to query for vendor extension metadata tag information. The + * HAL should fill in all the vendor tag operation methods, or leave ops + * unchanged if no vendor tags are defined. + * + * The vendor_tag_ops structure used here is defined in: + * system/media/camera/include/system/vendor_tags.h + * + * Version information (based on camera_module_t.common.module_api_version): + * + * CAMERA_MODULE_API_VERSION_1_x/2_0/2_1: + * Not provided by HAL module. Framework may not call this function. + * + * CAMERA_MODULE_API_VERSION_2_2: + * Valid to be called by the framework. + */ + void (*get_vendor_tag_ops)(vendor_tag_ops_t* ops); + + /** + * open_legacy: + * + * Open a specific legacy camera HAL device if multiple device HAL API + * versions are supported by this camera HAL module. For example, if the + * camera module supports both CAMERA_DEVICE_API_VERSION_1_0 and + * CAMERA_DEVICE_API_VERSION_3_2 device API for the same camera id, + * framework can call this function to open the camera device as + * CAMERA_DEVICE_API_VERSION_1_0 device. + * + * This is an optional method. A Camera HAL module does not need to support + * more than one device HAL version per device, and such modules may return + * -ENOSYS for all calls to this method. For all older HAL device API + * versions that are not supported, it may return -EOPNOTSUPP. When above + * cases occur, The normal open() method (common.methods->open) will be + * used by the framework instead. + * + * Version information (based on camera_module_t.common.module_api_version): + * + * CAMERA_MODULE_API_VERSION_1_x/2_0/2_1/2_2: + * Not provided by HAL module. Framework will not call this function. + * + * CAMERA_MODULE_API_VERSION_2_3: + * Valid to be called by the framework. + * + * Return values: + * + * 0: On a successful open of the camera device. + * + * -ENOSYS This method is not supported. + * + * -EOPNOTSUPP: The requested HAL version is not supported by this method. + * + * -EINVAL: The input arguments are invalid, i.e. the id is invalid, + * and/or the module is invalid. + * + * -EBUSY: The camera device was already opened for this camera id + * (by using this method or common.methods->open method), + * regardless of the device HAL version it was opened as. + * + * -EUSERS: The maximal number of camera devices that can be + * opened concurrently were opened already, either by + * this method or common.methods->open method. + */ + int (*open_legacy)(const struct hw_module_t* module, const char* id, + uint32_t halVersion, struct hw_device_t** device); + + /** + * set_torch_mode: + * + * Turn on or off the torch mode of the flash unit associated with a given + * camera ID. If the operation is successful, HAL must notify the framework + * torch state by invoking + * camera_module_callbacks.torch_mode_status_change() with the new state. + * + * The camera device has a higher priority accessing the flash unit. When + * there are any resource conflicts, such as open() is called to open a + * camera device, HAL module must notify the framework through + * camera_module_callbacks.torch_mode_status_change() that the + * torch mode has been turned off and the torch mode state has become + * TORCH_MODE_STATUS_NOT_AVAILABLE. When resources to turn on torch mode + * become available again, HAL module must notify the framework through + * camera_module_callbacks.torch_mode_status_change() that the torch mode + * state has become TORCH_MODE_STATUS_AVAILABLE_OFF for set_torch_mode() to + * be called. + * + * When the framework calls set_torch_mode() to turn on the torch mode of a + * flash unit, if HAL cannot keep multiple torch modes on simultaneously, + * HAL should turn off the torch mode that was turned on by + * a previous set_torch_mode() call and notify the framework that the torch + * mode state of that flash unit has become TORCH_MODE_STATUS_AVAILABLE_OFF. + * + * Version information (based on camera_module_t.common.module_api_version): + * + * CAMERA_MODULE_API_VERSION_1_x/2_0/2_1/2_2/2_3: + * Not provided by HAL module. Framework will not call this function. + * + * CAMERA_MODULE_API_VERSION_2_4: + * Valid to be called by the framework. + * + * Return values: + * + * 0: On a successful operation. + * + * -ENOSYS: The camera device does not support this operation. It is + * returned if and only if android.flash.info.available is + * false. + * + * -EBUSY: The camera device is already in use. + * + * -EUSERS: The resources needed to turn on the torch mode are not + * available, typically because other camera devices are + * holding the resources to make using the flash unit not + * possible. + * + * -EINVAL: camera_id is invalid. + * + */ + int (*set_torch_mode)(const char* camera_id, bool enabled); + + /** + * init: + * + * This method is called by the camera service before any other methods + * are invoked, right after the camera HAL library has been successfully + * loaded. It may be left as NULL by the HAL module, if no initialization + * in needed. + * + * It can be used by HAL implementations to perform initialization and + * other one-time operations. + * + * Version information (based on camera_module_t.common.module_api_version): + * + * CAMERA_MODULE_API_VERSION_1_x/2_0/2_1/2_2/2_3: + * Not provided by HAL module. Framework will not call this function. + * + * CAMERA_MODULE_API_VERSION_2_4: + * If not NULL, will always be called by the framework once after the HAL + * module is loaded, before any other HAL module method is called. + * + * Return values: + * + * 0: On a successful operation. + * + * -ENODEV: Initialization cannot be completed due to an internal + * error. The HAL must be assumed to be in a nonfunctional + * state. + * + */ + int (*init)(); + + /* reserved for future use */ + void* reserved[5]; +} camera_module_t; + +__END_DECLS + +#endif /* ANDROID_INCLUDE_CAMERA_COMMON_H */ diff --git a/spider-cam/libcamera/include/android/hardware/libhardware/include/hardware/fb.h b/spider-cam/libcamera/include/android/hardware/libhardware/include/hardware/fb.h new file mode 100644 index 0000000..2d9153a --- /dev/null +++ b/spider-cam/libcamera/include/android/hardware/libhardware/include/hardware/fb.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* + * Copyright (C) 2008 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef ANDROID_FB_INTERFACE_H +#define ANDROID_FB_INTERFACE_H + +#include +#include +#include + +#include + +#include + +__BEGIN_DECLS + +#define GRALLOC_HARDWARE_FB0 "fb0" + +/*****************************************************************************/ + + +/*****************************************************************************/ + +typedef struct framebuffer_device_t { + /** + * Common methods of the framebuffer device. This *must* be the first member of + * framebuffer_device_t as users of this structure will cast a hw_device_t to + * framebuffer_device_t pointer in contexts where it's known the hw_device_t references a + * framebuffer_device_t. + */ + struct hw_device_t common; + + /* flags describing some attributes of the framebuffer */ + const uint32_t flags; + + /* dimensions of the framebuffer in pixels */ + const uint32_t width; + const uint32_t height; + + /* frambuffer stride in pixels */ + const int stride; + + /* framebuffer pixel format */ + const int format; + + /* resolution of the framebuffer's display panel in pixel per inch*/ + const float xdpi; + const float ydpi; + + /* framebuffer's display panel refresh rate in frames per second */ + const float fps; + + /* min swap interval supported by this framebuffer */ + const int minSwapInterval; + + /* max swap interval supported by this framebuffer */ + const int maxSwapInterval; + + /* Number of framebuffers supported*/ + const int numFramebuffers; + + int reserved[7]; + + /* + * requests a specific swap-interval (same definition than EGL) + * + * Returns 0 on success or -errno on error. + */ + int (*setSwapInterval)(struct framebuffer_device_t* window, + int interval); + + /* + * This hook is OPTIONAL. + * + * It is non NULL If the framebuffer driver supports "update-on-demand" + * and the given rectangle is the area of the screen that gets + * updated during (*post)(). + * + * This is useful on devices that are able to DMA only a portion of + * the screen to the display panel, upon demand -- as opposed to + * constantly refreshing the panel 60 times per second, for instance. + * + * Only the area defined by this rectangle is guaranteed to be valid, that + * is, the driver is not allowed to post anything outside of this + * rectangle. + * + * The rectangle evaluated during (*post)() and specifies which area + * of the buffer passed in (*post)() shall to be posted. + * + * return -EINVAL if width or height <=0, or if left or top < 0 + */ + int (*setUpdateRect)(struct framebuffer_device_t* window, + int left, int top, int width, int height); + + /* + * Post to the display (display it on the screen) + * The buffer must have been allocated with the + * GRALLOC_USAGE_HW_FB usage flag. + * buffer must be the same width and height as the display and must NOT + * be locked. + * + * The buffer is shown during the next VSYNC. + * + * If the same buffer is posted again (possibly after some other buffer), + * post() will block until the the first post is completed. + * + * Internally, post() is expected to lock the buffer so that a + * subsequent call to gralloc_module_t::(*lock)() with USAGE_RENDER or + * USAGE_*_WRITE will block until it is safe; that is typically once this + * buffer is shown and another buffer has been posted. + * + * Returns 0 on success or -errno on error. + */ + int (*post)(struct framebuffer_device_t* dev, buffer_handle_t buffer); + + + /* + * The (*compositionComplete)() method must be called after the + * compositor has finished issuing GL commands for client buffers. + */ + + int (*compositionComplete)(struct framebuffer_device_t* dev); + + /* + * This hook is OPTIONAL. + * + * If non NULL it will be caused by SurfaceFlinger on dumpsys + */ + void (*dump)(struct framebuffer_device_t* dev, char *buff, int buff_len); + + /* + * (*enableScreen)() is used to either blank (enable=0) or + * unblank (enable=1) the screen this framebuffer is attached to. + * + * Returns 0 on success or -errno on error. + */ + int (*enableScreen)(struct framebuffer_device_t* dev, int enable); + + void* reserved_proc[6]; + +} framebuffer_device_t; + + +/** convenience API for opening and closing a supported device */ + +static inline int framebuffer_open(const struct hw_module_t* module, + struct framebuffer_device_t** device) { + return module->methods->open(module, + GRALLOC_HARDWARE_FB0, TO_HW_DEVICE_T_OPEN(device)); +} + +static inline int framebuffer_close(struct framebuffer_device_t* device) { + return device->common.close(&device->common); +} + + +__END_DECLS + +#endif // ANDROID_FB_INTERFACE_H diff --git a/spider-cam/libcamera/include/android/hardware/libhardware/include/hardware/gralloc.h b/spider-cam/libcamera/include/android/hardware/libhardware/include/hardware/gralloc.h new file mode 100644 index 0000000..01a7672 --- /dev/null +++ b/spider-cam/libcamera/include/android/hardware/libhardware/include/hardware/gralloc.h @@ -0,0 +1,416 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* + * Copyright (C) 2008 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef ANDROID_GRALLOC_INTERFACE_H +#define ANDROID_GRALLOC_INTERFACE_H + +#include +#include + +#include +#include +#include + +#include + +#include +#include + +__BEGIN_DECLS + +/** + * Module versioning information for the Gralloc hardware module, based on + * gralloc_module_t.common.module_api_version. + * + * Version History: + * + * GRALLOC_MODULE_API_VERSION_0_1: + * Initial Gralloc hardware module API. + * + * GRALLOC_MODULE_API_VERSION_0_2: + * Add support for flexible YCbCr format with (*lock_ycbcr)() method. + * + * GRALLOC_MODULE_API_VERSION_0_3: + * Add support for fence passing to/from lock/unlock. + */ + +#define GRALLOC_MODULE_API_VERSION_0_1 HARDWARE_MODULE_API_VERSION(0, 1) +#define GRALLOC_MODULE_API_VERSION_0_2 HARDWARE_MODULE_API_VERSION(0, 2) +#define GRALLOC_MODULE_API_VERSION_0_3 HARDWARE_MODULE_API_VERSION(0, 3) + +#define GRALLOC_DEVICE_API_VERSION_0_1 HARDWARE_DEVICE_API_VERSION(0, 1) + +/** + * The id of this module + */ +#define GRALLOC_HARDWARE_MODULE_ID "gralloc" + +/** + * Name of the graphics device to open + */ + +#define GRALLOC_HARDWARE_GPU0 "gpu0" + +enum { + /* buffer is never read in software */ + GRALLOC_USAGE_SW_READ_NEVER = 0x00000000U, + /* buffer is rarely read in software */ + GRALLOC_USAGE_SW_READ_RARELY = 0x00000002U, + /* buffer is often read in software */ + GRALLOC_USAGE_SW_READ_OFTEN = 0x00000003U, + /* mask for the software read values */ + GRALLOC_USAGE_SW_READ_MASK = 0x0000000FU, + + /* buffer is never written in software */ + GRALLOC_USAGE_SW_WRITE_NEVER = 0x00000000U, + /* buffer is rarely written in software */ + GRALLOC_USAGE_SW_WRITE_RARELY = 0x00000020U, + /* buffer is often written in software */ + GRALLOC_USAGE_SW_WRITE_OFTEN = 0x00000030U, + /* mask for the software write values */ + GRALLOC_USAGE_SW_WRITE_MASK = 0x000000F0U, + + /* buffer will be used as an OpenGL ES texture */ + GRALLOC_USAGE_HW_TEXTURE = 0x00000100U, + /* buffer will be used as an OpenGL ES render target */ + GRALLOC_USAGE_HW_RENDER = 0x00000200U, + /* buffer will be used by the 2D hardware blitter */ + GRALLOC_USAGE_HW_2D = 0x00000400U, + /* buffer will be used by the HWComposer HAL module */ + GRALLOC_USAGE_HW_COMPOSER = 0x00000800U, + /* buffer will be used with the framebuffer device */ + GRALLOC_USAGE_HW_FB = 0x00001000U, + + /* buffer should be displayed full-screen on an external display when + * possible */ + GRALLOC_USAGE_EXTERNAL_DISP = 0x00002000U, + + /* Must have a hardware-protected path to external display sink for + * this buffer. If a hardware-protected path is not available, then + * either don't composite only this buffer (preferred) to the + * external sink, or (less desirable) do not route the entire + * composition to the external sink. */ + GRALLOC_USAGE_PROTECTED = 0x00004000U, + + /* buffer may be used as a cursor */ + GRALLOC_USAGE_CURSOR = 0x00008000U, + + /* buffer will be used with the HW video encoder */ + GRALLOC_USAGE_HW_VIDEO_ENCODER = 0x00010000U, + /* buffer will be written by the HW camera pipeline */ + GRALLOC_USAGE_HW_CAMERA_WRITE = 0x00020000U, + /* buffer will be read by the HW camera pipeline */ + GRALLOC_USAGE_HW_CAMERA_READ = 0x00040000U, + /* buffer will be used as part of zero-shutter-lag queue */ + GRALLOC_USAGE_HW_CAMERA_ZSL = 0x00060000U, + /* mask for the camera access values */ + GRALLOC_USAGE_HW_CAMERA_MASK = 0x00060000U, + /* mask for the software usage bit-mask */ + GRALLOC_USAGE_HW_MASK = 0x00071F00U, + + /* buffer will be used as a RenderScript Allocation */ + GRALLOC_USAGE_RENDERSCRIPT = 0x00100000U, + + /* Set by the consumer to indicate to the producer that they may attach a + * buffer that they did not detach from the BufferQueue. Will be filtered + * out by GRALLOC_USAGE_ALLOC_MASK, so gralloc modules will not need to + * handle this flag. */ + GRALLOC_USAGE_FOREIGN_BUFFERS = 0x00200000U, + + /* Mask of all flags which could be passed to a gralloc module for buffer + * allocation. Any flags not in this mask do not need to be handled by + * gralloc modules. */ + GRALLOC_USAGE_ALLOC_MASK = ~(GRALLOC_USAGE_FOREIGN_BUFFERS), + + /* implementation-specific private usage flags */ + GRALLOC_USAGE_PRIVATE_0 = 0x10000000U, + GRALLOC_USAGE_PRIVATE_1 = 0x20000000U, + GRALLOC_USAGE_PRIVATE_2 = 0x40000000U, + GRALLOC_USAGE_PRIVATE_3 = 0x80000000U, + GRALLOC_USAGE_PRIVATE_MASK = 0xF0000000U, +}; + +/*****************************************************************************/ + +/** + * Every hardware module must have a data structure named HAL_MODULE_INFO_SYM + * and the fields of this data structure must begin with hw_module_t + * followed by module specific information. + */ +typedef struct gralloc_module_t { + struct hw_module_t common; + + /* + * (*registerBuffer)() must be called before a buffer_handle_t that has not + * been created with (*alloc_device_t::alloc)() can be used. + * + * This is intended to be used with buffer_handle_t's that have been + * received in this process through IPC. + * + * This function checks that the handle is indeed a valid one and prepares + * it for use with (*lock)() and (*unlock)(). + * + * It is not necessary to call (*registerBuffer)() on a handle created + * with (*alloc_device_t::alloc)(). + * + * returns an error if this buffer_handle_t is not valid. + */ + int (*registerBuffer)(struct gralloc_module_t const* module, + buffer_handle_t handle); + + /* + * (*unregisterBuffer)() is called once this handle is no longer needed in + * this process. After this call, it is an error to call (*lock)(), + * (*unlock)(), or (*registerBuffer)(). + * + * This function doesn't close or free the handle itself; this is done + * by other means, usually through libcutils's native_handle_close() and + * native_handle_free(). + * + * It is an error to call (*unregisterBuffer)() on a buffer that wasn't + * explicitly registered first. + */ + int (*unregisterBuffer)(struct gralloc_module_t const* module, + buffer_handle_t handle); + + /* + * The (*lock)() method is called before a buffer is accessed for the + * specified usage. This call may block, for instance if the h/w needs + * to finish rendering or if CPU caches need to be synchronized. + * + * The caller promises to modify only pixels in the area specified + * by (l,t,w,h). + * + * The content of the buffer outside of the specified area is NOT modified + * by this call. + * + * If usage specifies GRALLOC_USAGE_SW_*, vaddr is filled with the address + * of the buffer in virtual memory. + * + * Note calling (*lock)() on HAL_PIXEL_FORMAT_YCbCr_*_888 buffers will fail + * and return -EINVAL. These buffers must be locked with (*lock_ycbcr)() + * instead. + * + * THREADING CONSIDERATIONS: + * + * It is legal for several different threads to lock a buffer from + * read access, none of the threads are blocked. + * + * However, locking a buffer simultaneously for write or read/write is + * undefined, but: + * - shall not result in termination of the process + * - shall not block the caller + * It is acceptable to return an error or to leave the buffer's content + * into an indeterminate state. + * + * If the buffer was created with a usage mask incompatible with the + * requested usage flags here, -EINVAL is returned. + * + */ + + int (*lock)(struct gralloc_module_t const* module, + buffer_handle_t handle, int usage, + int l, int t, int w, int h, + void** vaddr); + + + /* + * The (*unlock)() method must be called after all changes to the buffer + * are completed. + */ + + int (*unlock)(struct gralloc_module_t const* module, + buffer_handle_t handle); + + + /* reserved for future use */ + int (*perform)(struct gralloc_module_t const* module, + int operation, ... ); + + /* + * The (*lock_ycbcr)() method is like the (*lock)() method, with the + * difference that it fills a struct ycbcr with a description of the buffer + * layout, and zeroes out the reserved fields. + * + * If the buffer format is not compatible with a flexible YUV format (e.g. + * the buffer layout cannot be represented with the ycbcr struct), it + * will return -EINVAL. + * + * This method must work on buffers with HAL_PIXEL_FORMAT_YCbCr_*_888 + * if supported by the device, as well as with any other format that is + * requested by the multimedia codecs when they are configured with a + * flexible-YUV-compatible color-format with android native buffers. + * + * Note that this method may also be called on buffers of other formats, + * including non-YUV formats. + * + * Added in GRALLOC_MODULE_API_VERSION_0_2. + */ + + int (*lock_ycbcr)(struct gralloc_module_t const* module, + buffer_handle_t handle, int usage, + int l, int t, int w, int h, + struct android_ycbcr *ycbcr); + + /* + * The (*lockAsync)() method is like the (*lock)() method except + * that the buffer's sync fence object is passed into the lock + * call instead of requiring the caller to wait for completion. + * + * The gralloc implementation takes ownership of the fenceFd and + * is responsible for closing it when no longer needed. + * + * Added in GRALLOC_MODULE_API_VERSION_0_3. + */ + int (*lockAsync)(struct gralloc_module_t const* module, + buffer_handle_t handle, int usage, + int l, int t, int w, int h, + void** vaddr, int fenceFd); + + /* + * The (*unlockAsync)() method is like the (*unlock)() method + * except that a buffer sync fence object is returned from the + * lock call, representing the completion of any pending work + * performed by the gralloc implementation. + * + * The caller takes ownership of the fenceFd and is responsible + * for closing it when no longer needed. + * + * Added in GRALLOC_MODULE_API_VERSION_0_3. + */ + int (*unlockAsync)(struct gralloc_module_t const* module, + buffer_handle_t handle, int* fenceFd); + + /* + * The (*lockAsync_ycbcr)() method is like the (*lock_ycbcr)() + * method except that the buffer's sync fence object is passed + * into the lock call instead of requiring the caller to wait for + * completion. + * + * The gralloc implementation takes ownership of the fenceFd and + * is responsible for closing it when no longer needed. + * + * Added in GRALLOC_MODULE_API_VERSION_0_3. + */ + int (*lockAsync_ycbcr)(struct gralloc_module_t const* module, + buffer_handle_t handle, int usage, + int l, int t, int w, int h, + struct android_ycbcr *ycbcr, int fenceFd); + + /* reserved for future use */ + void* reserved_proc[3]; +} gralloc_module_t; + +/*****************************************************************************/ + +/** + * Every device data structure must begin with hw_device_t + * followed by module specific public methods and attributes. + */ + +typedef struct alloc_device_t { + struct hw_device_t common; + + /* + * (*alloc)() Allocates a buffer in graphic memory with the requested + * parameters and returns a buffer_handle_t and the stride in pixels to + * allow the implementation to satisfy hardware constraints on the width + * of a pixmap (eg: it may have to be multiple of 8 pixels). + * The CALLER TAKES OWNERSHIP of the buffer_handle_t. + * + * If format is HAL_PIXEL_FORMAT_YCbCr_420_888, the returned stride must be + * 0, since the actual strides are available from the android_ycbcr + * structure. + * + * Returns 0 on success or -errno on error. + */ + + int (*alloc)(struct alloc_device_t* dev, + int w, int h, int format, int usage, + buffer_handle_t* handle, int* stride); + + /* + * (*free)() Frees a previously allocated buffer. + * Behavior is undefined if the buffer is still mapped in any process, + * but shall not result in termination of the program or security breaches + * (allowing a process to get access to another process' buffers). + * THIS FUNCTION TAKES OWNERSHIP of the buffer_handle_t which becomes + * invalid after the call. + * + * Returns 0 on success or -errno on error. + */ + int (*free)(struct alloc_device_t* dev, + buffer_handle_t handle); + + /* This hook is OPTIONAL. + * + * If non NULL it will be caused by SurfaceFlinger on dumpsys + */ + void (*dump)(struct alloc_device_t *dev, char *buff, int buff_len); + + void* reserved_proc[7]; +} alloc_device_t; + + +/** convenience API for opening and closing a supported device */ + +static inline int gralloc_open(const struct hw_module_t* module, + struct alloc_device_t** device) { + return module->methods->open(module, + GRALLOC_HARDWARE_GPU0, TO_HW_DEVICE_T_OPEN(device)); +} + +static inline int gralloc_close(struct alloc_device_t* device) { + return device->common.close(&device->common); +} + +/** + * map_usage_to_memtrack should be called after allocating a gralloc buffer. + * + * @param usage - it is the flag used when alloc function is called. + * + * This function maps the gralloc usage flags to appropriate memtrack bucket. + * GrallocHAL implementers and users should make an additional ION_IOCTL_TAG + * call using the memtrack tag returned by this function. This will help the + * in-kernel memtack to categorize the memory allocated by different processes + * according to their usage. + * + */ +static inline const char* map_usage_to_memtrack(uint32_t usage) { + usage &= GRALLOC_USAGE_ALLOC_MASK; + + if ((usage & GRALLOC_USAGE_HW_CAMERA_WRITE) != 0) { + return "camera"; + } else if ((usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) != 0 || + (usage & GRALLOC_USAGE_EXTERNAL_DISP) != 0) { + return "video"; + } else if ((usage & GRALLOC_USAGE_HW_RENDER) != 0 || + (usage & GRALLOC_USAGE_HW_TEXTURE) != 0) { + return "gl"; + } else if ((usage & GRALLOC_USAGE_HW_CAMERA_READ) != 0) { + return "camera"; + } else if ((usage & GRALLOC_USAGE_SW_READ_MASK) != 0 || + (usage & GRALLOC_USAGE_SW_WRITE_MASK) != 0) { + return "cpu"; + } + return "graphics"; +} + +__END_DECLS + +#endif // ANDROID_GRALLOC_INTERFACE_H diff --git a/spider-cam/libcamera/include/android/hardware/libhardware/include/hardware/hardware.h b/spider-cam/libcamera/include/android/hardware/libhardware/include/hardware/hardware.h new file mode 100644 index 0000000..8919f12 --- /dev/null +++ b/spider-cam/libcamera/include/android/hardware/libhardware/include/hardware/hardware.h @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* + * Copyright (C) 2008 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_INCLUDE_HARDWARE_HARDWARE_H +#define ANDROID_INCLUDE_HARDWARE_HARDWARE_H + +#include +#include + +#include +#include + +__BEGIN_DECLS + +/* + * Value for the hw_module_t.tag field + */ + +#define MAKE_TAG_CONSTANT(A,B,C,D) (((A) << 24) | ((B) << 16) | ((C) << 8) | (D)) + +#define HARDWARE_MODULE_TAG MAKE_TAG_CONSTANT('H', 'W', 'M', 'T') +#define HARDWARE_DEVICE_TAG MAKE_TAG_CONSTANT('H', 'W', 'D', 'T') + +#define HARDWARE_MAKE_API_VERSION(maj,min) \ + ((((maj) & 0xff) << 8) | ((min) & 0xff)) + +#define HARDWARE_MAKE_API_VERSION_2(maj,min,hdr) \ + ((((maj) & 0xff) << 24) | (((min) & 0xff) << 16) | ((hdr) & 0xffff)) +#define HARDWARE_API_VERSION_2_MAJ_MIN_MASK 0xffff0000 +#define HARDWARE_API_VERSION_2_HEADER_MASK 0x0000ffff + + +/* + * The current HAL API version. + * + * All module implementations must set the hw_module_t.hal_api_version field + * to this value when declaring the module with HAL_MODULE_INFO_SYM. + * + * Note that previous implementations have always set this field to 0. + * Therefore, libhardware HAL API will always consider versions 0.0 and 1.0 + * to be 100% binary compatible. + * + */ +#define HARDWARE_HAL_API_VERSION HARDWARE_MAKE_API_VERSION(1, 0) + +/* + * Helper macros for module implementors. + * + * The derived modules should provide convenience macros for supported + * versions so that implementations can explicitly specify module/device + * versions at definition time. + * + * Use this macro to set the hw_module_t.module_api_version field. + */ +#define HARDWARE_MODULE_API_VERSION(maj,min) HARDWARE_MAKE_API_VERSION(maj,min) +#define HARDWARE_MODULE_API_VERSION_2(maj,min,hdr) HARDWARE_MAKE_API_VERSION_2(maj,min,hdr) + +/* + * Use this macro to set the hw_device_t.version field + */ +#define HARDWARE_DEVICE_API_VERSION(maj,min) HARDWARE_MAKE_API_VERSION(maj,min) +#define HARDWARE_DEVICE_API_VERSION_2(maj,min,hdr) HARDWARE_MAKE_API_VERSION_2(maj,min,hdr) + +struct hw_module_t; +struct hw_module_methods_t; +struct hw_device_t; + +/** + * Every hardware module must have a data structure named HAL_MODULE_INFO_SYM + * and the fields of this data structure must begin with hw_module_t + * followed by module specific information. + */ +typedef struct hw_module_t { + /** tag must be initialized to HARDWARE_MODULE_TAG */ + uint32_t tag; + + /** + * The API version of the implemented module. The module owner is + * responsible for updating the version when a module interface has + * changed. + * + * The derived modules such as gralloc and audio own and manage this field. + * The module user must interpret the version field to decide whether or + * not to inter-operate with the supplied module implementation. + * For example, SurfaceFlinger is responsible for making sure that + * it knows how to manage different versions of the gralloc-module API, + * and AudioFlinger must know how to do the same for audio-module API. + * + * The module API version should include a major and a minor component. + * For example, version 1.0 could be represented as 0x0100. This format + * implies that versions 0x0100-0x01ff are all API-compatible. + * + * In the future, libhardware will expose a hw_get_module_version() + * (or equivalent) function that will take minimum/maximum supported + * versions as arguments and would be able to reject modules with + * versions outside of the supplied range. + */ + uint16_t module_api_version; +#define version_major module_api_version + /** + * version_major/version_minor defines are supplied here for temporary + * source code compatibility. They will be removed in the next version. + * ALL clients must convert to the new version format. + */ + + /** + * The API version of the HAL module interface. This is meant to + * version the hw_module_t, hw_module_methods_t, and hw_device_t + * structures and definitions. + * + * The HAL interface owns this field. Module users/implementations + * must NOT rely on this value for version information. + * + * Presently, 0 is the only valid value. + */ + uint16_t hal_api_version; +#define version_minor hal_api_version + + /** Identifier of module */ + const char *id; + + /** Name of this module */ + const char *name; + + /** Author/owner/implementor of the module */ + const char *author; + + /** Modules methods */ + struct hw_module_methods_t* methods; + + /** module's dso */ + void* dso; + +#ifdef __LP64__ + uint64_t reserved[32-7]; +#else + /** padding to 128 bytes, reserved for future use */ + uint32_t reserved[32-7]; +#endif + +} hw_module_t; + +typedef struct hw_module_methods_t { + /** Open a specific device */ + int (*open)(const struct hw_module_t* module, const char* id, + struct hw_device_t** device); + +} hw_module_methods_t; + +/** + * Every device data structure must begin with hw_device_t + * followed by module specific public methods and attributes. + */ +typedef struct hw_device_t { + /** tag must be initialized to HARDWARE_DEVICE_TAG */ + uint32_t tag; + + /** + * Version of the module-specific device API. This value is used by + * the derived-module user to manage different device implementations. + * + * The module user is responsible for checking the module_api_version + * and device version fields to ensure that the user is capable of + * communicating with the specific module implementation. + * + * One module can support multiple devices with different versions. This + * can be useful when a device interface changes in an incompatible way + * but it is still necessary to support older implementations at the same + * time. One such example is the Camera 2.0 API. + * + * This field is interpreted by the module user and is ignored by the + * HAL interface itself. + */ + uint32_t version; + + /** reference to the module this device belongs to */ + struct hw_module_t* module; + + /** padding reserved for future use */ +#ifdef __LP64__ + uint64_t reserved[12]; +#else + uint32_t reserved[12]; +#endif + + /** Close this device */ + int (*close)(struct hw_device_t* device); + +} hw_device_t; + +#ifdef __cplusplus +#define TO_HW_DEVICE_T_OPEN(x) reinterpret_cast(x) +#else +#define TO_HW_DEVICE_T_OPEN(x) (struct hw_device_t**)(x) +#endif + +/** + * Name of the hal_module_info + */ +#define HAL_MODULE_INFO_SYM HMI + +/** + * Name of the hal_module_info as a string + */ +#define HAL_MODULE_INFO_SYM_AS_STR "HMI" + +/** + * Get the module info associated with a module by id. + * + * @return: 0 == success, <0 == error and *module == NULL + */ +int hw_get_module(const char *id, const struct hw_module_t **module); + +/** + * Get the module info associated with a module instance by class 'class_id' + * and instance 'inst'. + * + * Some modules types necessitate multiple instances. For example audio supports + * multiple concurrent interfaces and thus 'audio' is the module class + * and 'primary' or 'a2dp' are module interfaces. This implies that the files + * providing these modules would be named audio.primary..so and + * audio.a2dp..so + * + * @return: 0 == success, <0 == error and *module == NULL + */ +int hw_get_module_by_class(const char *class_id, const char *inst, + const struct hw_module_t **module); + +__END_DECLS + +#endif /* ANDROID_INCLUDE_HARDWARE_HARDWARE_H */ diff --git a/spider-cam/libcamera/include/android/meson.build b/spider-cam/libcamera/include/android/meson.build new file mode 100644 index 0000000..da2504f --- /dev/null +++ b/spider-cam/libcamera/include/android/meson.build @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: CC0-1.0 + +android_includes = ([ + include_directories('hardware/libhardware/include/'), + include_directories('metadata/'), + include_directories('system/core/include'), +]) diff --git a/spider-cam/libcamera/include/android/metadata/camera_metadata_hidden.h b/spider-cam/libcamera/include/android/metadata/camera_metadata_hidden.h new file mode 100644 index 0000000..91b11e4 --- /dev/null +++ b/spider-cam/libcamera/include/android/metadata/camera_metadata_hidden.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* + * Copyright 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SYSTEM_MEDIA_PRIVATE_INCLUDE_CAMERA_METADATA_HIDDEN_H +#define SYSTEM_MEDIA_PRIVATE_INCLUDE_CAMERA_METADATA_HIDDEN_H + +#include + +/** + * Error codes returned by vendor tags ops operations. These are intended + * to be used by all framework code that uses the return values from the + * vendor operations object. + */ +#define VENDOR_SECTION_NAME_ERR NULL +#define VENDOR_TAG_NAME_ERR NULL +#define VENDOR_TAG_COUNT_ERR (-1) +#define VENDOR_TAG_TYPE_ERR (-1) + +#ifdef __cplusplus +extern "C" { +#endif +/** **These are private functions for use only by the camera framework.** **/ + +/** + * Set the global vendor tag operations object used to define vendor tag + * structure when parsing camera metadata with functions defined in + * system/media/camera/include/camera_metadata.h. + */ +ANDROID_API +int set_camera_metadata_vendor_ops(const vendor_tag_ops_t *query_ops); + +/** + * Set the global vendor tag cache operations object used to define vendor tag + * structure when parsing camera metadata with functions defined in + * system/media/camera/include/camera_metadata.h. + */ +ANDROID_API +int set_camera_metadata_vendor_cache_ops( + const struct vendor_tag_cache_ops *query_cache_ops); + +/** + * Set the vendor id for a particular metadata buffer. + */ +ANDROID_API +void set_camera_metadata_vendor_id(camera_metadata_t *meta, + metadata_vendor_id_t id); + +/** + * Retrieve the vendor id for a particular metadata buffer. + */ +ANDROID_API +metadata_vendor_id_t get_camera_metadata_vendor_id( + const camera_metadata_t *meta); + +/** + * Retrieve the type of a tag. Returns -1 if no such tag is defined. + */ +ANDROID_API +int get_local_camera_metadata_tag_type_vendor_id(uint32_t tag, + metadata_vendor_id_t id); + +/** + * Retrieve the name of a tag. Returns NULL if no such tag is defined. + */ +ANDROID_API +const char *get_local_camera_metadata_tag_name_vendor_id(uint32_t tag, + metadata_vendor_id_t id); + +/** + * Retrieve the name of a tag section. Returns NULL if no such tag is defined. + */ +ANDROID_API +const char *get_local_camera_metadata_section_name_vendor_id(uint32_t tag, + metadata_vendor_id_t id); + +/** + * Retrieve the type of a tag. Returns -1 if no such tag is defined. + */ +ANDROID_API +int get_local_camera_metadata_tag_type_vendor_id(uint32_t tag, + metadata_vendor_id_t id); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* SYSTEM_MEDIA_PRIVATE_INCLUDE_CAMERA_METADATA_HIDDEN_H */ diff --git a/spider-cam/libcamera/include/android/metadata/system/camera_metadata.h b/spider-cam/libcamera/include/android/metadata/system/camera_metadata.h new file mode 100644 index 0000000..1672b09 --- /dev/null +++ b/spider-cam/libcamera/include/android/metadata/system/camera_metadata.h @@ -0,0 +1,581 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SYSTEM_MEDIA_INCLUDE_ANDROID_CAMERA_METADATA_H +#define SYSTEM_MEDIA_INCLUDE_ANDROID_CAMERA_METADATA_H + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Tag hierarchy and enum definitions for camera_metadata_entry + * ============================================================================= + */ + +/** + * Main enum definitions are in a separate file to make it easy to + * maintain + */ +#include "camera_metadata_tags.h" + +/** + * Enum range for each top-level category + */ +ANDROID_API +extern unsigned int camera_metadata_section_bounds[ANDROID_SECTION_COUNT][2]; +ANDROID_API +extern const char *camera_metadata_section_names[ANDROID_SECTION_COUNT]; + +/** + * Type definitions for camera_metadata_entry + * ============================================================================= + */ +enum { + // Unsigned 8-bit integer (uint8_t) + TYPE_BYTE = 0, + // Signed 32-bit integer (int32_t) + TYPE_INT32 = 1, + // 32-bit float (float) + TYPE_FLOAT = 2, + // Signed 64-bit integer (int64_t) + TYPE_INT64 = 3, + // 64-bit float (double) + TYPE_DOUBLE = 4, + // A 64-bit fraction (camera_metadata_rational_t) + TYPE_RATIONAL = 5, + // Number of type fields + NUM_TYPES +}; + +typedef struct camera_metadata_rational { + int32_t numerator; + int32_t denominator; +} camera_metadata_rational_t; + +/** + * A reference to a metadata entry in a buffer. + * + * The data union pointers point to the real data in the buffer, and can be + * modified in-place if the count does not need to change. The count is the + * number of entries in data of the entry's type, not a count of bytes. + */ +typedef struct camera_metadata_entry { + size_t index; + uint32_t tag; + uint8_t type; + size_t count; + union { + uint8_t *u8; + int32_t *i32; + float *f; + int64_t *i64; + double *d; + camera_metadata_rational_t *r; + } data; +} camera_metadata_entry_t; + +/** + * A read-only reference to a metadata entry in a buffer. Identical to + * camera_metadata_entry in layout + */ +typedef struct camera_metadata_ro_entry { + size_t index; + uint32_t tag; + uint8_t type; + size_t count; + union { + const uint8_t *u8; + const int32_t *i32; + const float *f; + const int64_t *i64; + const double *d; + const camera_metadata_rational_t *r; + } data; +} camera_metadata_ro_entry_t; + +/** + * Size in bytes of each entry type + */ +ANDROID_API +extern const size_t camera_metadata_type_size[NUM_TYPES]; + +/** + * Human-readable name of each entry type + */ +ANDROID_API +extern const char* camera_metadata_type_names[NUM_TYPES]; + +/** + * Main definitions for the metadata entry and array structures + * ============================================================================= + */ + +/** + * A packet of metadata. This is a list of metadata entries, each of which has + * an integer tag to identify its meaning, 'type' and 'count' field, and the + * data, which contains a 'count' number of entries of type 'type'. The packet + * has a fixed capacity for entries and for extra data. A new entry uses up one + * entry slot, and possibly some amount of data capacity; the function + * calculate_camera_metadata_entry_data_size() provides the amount of data + * capacity that would be used up by an entry. + * + * Entries are not sorted by default, and are not forced to be unique - multiple + * entries with the same tag are allowed. The packet will not dynamically resize + * when full. + * + * The packet is contiguous in memory, with size in bytes given by + * get_camera_metadata_size(). Therefore, it can be copied safely with memcpy() + * to a buffer of sufficient size. The copy_camera_metadata() function is + * intended for eliminating unused capacity in the destination packet. + */ +struct camera_metadata; +typedef struct camera_metadata camera_metadata_t; + +/** + * Functions for manipulating camera metadata + * ============================================================================= + * + * NOTE: Unless otherwise specified, functions that return type "int" + * return 0 on success, and non-0 value on error. + */ + +/** + * Allocate a new camera_metadata structure, with some initial space for entries + * and extra data. The entry_capacity is measured in entry counts, and + * data_capacity in bytes. The resulting structure is all contiguous in memory, + * and can be freed with free_camera_metadata(). + */ +ANDROID_API +camera_metadata_t *allocate_camera_metadata(size_t entry_capacity, + size_t data_capacity); + +/** + * Get the required alignment of a packet of camera metadata, which is the + * maximal alignment of the embedded camera_metadata, camera_metadata_buffer_entry, + * and camera_metadata_data. + */ +ANDROID_API +size_t get_camera_metadata_alignment(); + +/** + * Allocate a new camera_metadata structure of size src_size. Copy the data, + * ignoring alignment, and then attempt validation. If validation + * fails, free the memory and return NULL. Otherwise return the pointer. + * + * The resulting pointer can be freed with free_camera_metadata(). + */ +ANDROID_API +camera_metadata_t *allocate_copy_camera_metadata_checked( + const camera_metadata_t *src, + size_t src_size); + +/** + * Place a camera metadata structure into an existing buffer. Returns NULL if + * the buffer is too small for the requested number of reserved entries and + * bytes of data. The entry_capacity is measured in entry counts, and + * data_capacity in bytes. If the buffer is larger than the required space, + * unused space will be left at the end. If successful, returns a pointer to the + * metadata header placed at the start of the buffer. It is the caller's + * responsibility to free the original buffer; do not call + * free_camera_metadata() with the returned pointer. + */ +ANDROID_API +camera_metadata_t *place_camera_metadata(void *dst, size_t dst_size, + size_t entry_capacity, + size_t data_capacity); + +/** + * Free a camera_metadata structure. Should only be used with structures + * allocated with allocate_camera_metadata(). + */ +ANDROID_API +void free_camera_metadata(camera_metadata_t *metadata); + +/** + * Calculate the buffer size needed for a metadata structure of entry_count + * metadata entries, needing a total of data_count bytes of extra data storage. + */ +ANDROID_API +size_t calculate_camera_metadata_size(size_t entry_count, + size_t data_count); + +/** + * Get current size of entire metadata structure in bytes, including reserved + * but unused space. + */ +ANDROID_API +size_t get_camera_metadata_size(const camera_metadata_t *metadata); + +/** + * Get size of entire metadata buffer in bytes, not including reserved but + * unused space. This is the amount of space needed by copy_camera_metadata for + * its dst buffer. + */ +ANDROID_API +size_t get_camera_metadata_compact_size(const camera_metadata_t *metadata); + +/** + * Get the current number of entries in the metadata packet. + * + * metadata packet must be valid, which can be checked before the call with + * validate_camera_metadata_structure(). + */ +ANDROID_API +size_t get_camera_metadata_entry_count(const camera_metadata_t *metadata); + +/** + * Get the maximum number of entries that could fit in the metadata packet. + */ +ANDROID_API +size_t get_camera_metadata_entry_capacity(const camera_metadata_t *metadata); + +/** + * Get the current count of bytes used for value storage in the metadata packet. + */ +ANDROID_API +size_t get_camera_metadata_data_count(const camera_metadata_t *metadata); + +/** + * Get the maximum count of bytes that could be used for value storage in the + * metadata packet. + */ +ANDROID_API +size_t get_camera_metadata_data_capacity(const camera_metadata_t *metadata); + +/** + * Copy a metadata structure to a memory buffer, compacting it along the + * way. That is, in the copied structure, entry_count == entry_capacity, and + * data_count == data_capacity. + * + * If dst_size > get_camera_metadata_compact_size(), the unused bytes are at the + * end of the buffer. If dst_size < get_camera_metadata_compact_size(), returns + * NULL. Otherwise returns a pointer to the metadata structure header placed at + * the start of dst. + * + * Since the buffer was not allocated by allocate_camera_metadata, the caller is + * responsible for freeing the underlying buffer when needed; do not call + * free_camera_metadata. + */ +ANDROID_API +camera_metadata_t *copy_camera_metadata(void *dst, size_t dst_size, + const camera_metadata_t *src); + + +// Non-zero return values for validate_camera_metadata_structure +enum { + CAMERA_METADATA_VALIDATION_ERROR = 1, + CAMERA_METADATA_VALIDATION_SHIFTED = 2, +}; + +/** + * Validate that a metadata is structurally sane. That is, its internal + * state is such that we won't get buffer overflows or run into other + * 'impossible' issues when calling the other API functions. + * + * This is useful in particular after copying the binary metadata blob + * from an untrusted source, since passing this check means the data is at least + * consistent. + * + * The expected_size argument is optional. + * + * Returns 0: on success + * CAMERA_METADATA_VALIDATION_ERROR: on error + * CAMERA_METADATA_VALIDATION_SHIFTED: when the data is not properly aligned, but can be + * used as input of clone_camera_metadata and the returned metadata will be valid. + * + */ +ANDROID_API +int validate_camera_metadata_structure(const camera_metadata_t *metadata, + const size_t *expected_size); + +/** + * Append camera metadata in src to an existing metadata structure in dst. This + * does not resize the destination structure, so if it is too small, a non-zero + * value is returned. On success, 0 is returned. Appending onto a sorted + * structure results in a non-sorted combined structure. + */ +ANDROID_API +int append_camera_metadata(camera_metadata_t *dst, const camera_metadata_t *src); + +/** + * Clone an existing metadata buffer, compacting along the way. This is + * equivalent to allocating a new buffer of the minimum needed size, then + * appending the buffer to be cloned into the new buffer. The resulting buffer + * can be freed with free_camera_metadata(). Returns NULL if cloning failed. + */ +ANDROID_API +camera_metadata_t *clone_camera_metadata(const camera_metadata_t *src); + +/** + * Calculate the number of bytes of extra data a given metadata entry will take + * up. That is, if entry of 'type' with a payload of 'data_count' values is + * added, how much will the value returned by get_camera_metadata_data_count() + * be increased? This value may be zero, if no extra data storage is needed. + */ +ANDROID_API +size_t calculate_camera_metadata_entry_data_size(uint8_t type, + size_t data_count); + +/** + * Add a metadata entry to a metadata structure. Returns 0 if the addition + * succeeded. Returns a non-zero value if there is insufficient reserved space + * left to add the entry, or if the tag is unknown. data_count is the number of + * entries in the data array of the tag's type, not a count of + * bytes. Vendor-defined tags can not be added using this method, unless + * set_vendor_tag_query_ops() has been called first. Entries are always added to + * the end of the structure (highest index), so after addition, a + * previously-sorted array will be marked as unsorted. + * + * Returns 0 on success. A non-0 value is returned on error. + */ +ANDROID_API +int add_camera_metadata_entry(camera_metadata_t *dst, + uint32_t tag, + const void *data, + size_t data_count); + +/** + * Sort the metadata buffer for fast searching. If already marked as sorted, + * does nothing. Adding or appending entries to the buffer will place the buffer + * back into an unsorted state. + * + * Returns 0 on success. A non-0 value is returned on error. + */ +ANDROID_API +int sort_camera_metadata(camera_metadata_t *dst); + +/** + * Get metadata entry at position index in the metadata buffer. + * Index must be less than entry count, which is returned by + * get_camera_metadata_entry_count(). + * + * src and index are inputs; the passed-in entry is updated with the details of + * the entry. The data pointer points to the real data in the buffer, and can be + * updated as long as the data count does not change. + * + * Returns 0 on success. A non-0 value is returned on error. + */ +ANDROID_API +int get_camera_metadata_entry(camera_metadata_t *src, + size_t index, + camera_metadata_entry_t *entry); + +/** + * Get metadata entry at position index, but disallow editing the data. + */ +ANDROID_API +int get_camera_metadata_ro_entry(const camera_metadata_t *src, + size_t index, + camera_metadata_ro_entry_t *entry); + +/** + * Find an entry with given tag value. If not found, returns -ENOENT. Otherwise, + * returns entry contents like get_camera_metadata_entry. + * + * If multiple entries with the same tag exist, does not have any guarantees on + * which is returned. To speed up searching for tags, sort the metadata + * structure first by calling sort_camera_metadata(). + */ +ANDROID_API +int find_camera_metadata_entry(camera_metadata_t *src, + uint32_t tag, + camera_metadata_entry_t *entry); + +/** + * Find an entry with given tag value, but disallow editing the data + */ +ANDROID_API +int find_camera_metadata_ro_entry(const camera_metadata_t *src, + uint32_t tag, + camera_metadata_ro_entry_t *entry); + +/** + * Delete an entry at given index. This is an expensive operation, since it + * requires repacking entries and possibly entry data. This also invalidates any + * existing camera_metadata_entry.data pointers to this buffer. Sorting is + * maintained. + */ +ANDROID_API +int delete_camera_metadata_entry(camera_metadata_t *dst, + size_t index); + +/** + * Updates a metadata entry with new data. If the data size is changing, may + * need to adjust the data array, making this an O(N) operation. If the data + * size is the same or still fits in the entry space, this is O(1). Maintains + * sorting, but invalidates camera_metadata_entry instances that point to the + * updated entry. If a non-NULL value is passed in to entry, the entry structure + * is updated to match the new buffer state. Returns a non-zero value if there + * is no room for the new data in the buffer. + */ +ANDROID_API +int update_camera_metadata_entry(camera_metadata_t *dst, + size_t index, + const void *data, + size_t data_count, + camera_metadata_entry_t *updated_entry); + +/** + * Retrieve human-readable name of section the tag is in. Returns NULL if + * no such tag is defined. Returns NULL for tags in the vendor section, unless + * set_vendor_tag_query_ops() has been used. + */ +ANDROID_API +const char *get_camera_metadata_section_name(uint32_t tag); + +/** + * Retrieve human-readable name of tag (not including section). Returns NULL if + * no such tag is defined. Returns NULL for tags in the vendor section, unless + * set_vendor_tag_query_ops() has been used. + */ +ANDROID_API +const char *get_camera_metadata_tag_name(uint32_t tag); + +/** + * Retrieve the type of a tag. Returns -1 if no such tag is defined. Returns -1 + * for tags in the vendor section, unless set_vendor_tag_query_ops() has been + * used. + */ +ANDROID_API +int get_camera_metadata_tag_type(uint32_t tag); + +/** + * Retrieve human-readable name of section the tag is in. Returns NULL if + * no such tag is defined. + */ +ANDROID_API +const char *get_local_camera_metadata_section_name(uint32_t tag, + const camera_metadata_t *meta); + +/** + * Retrieve human-readable name of tag (not including section). Returns NULL if + * no such tag is defined. + */ +ANDROID_API +const char *get_local_camera_metadata_tag_name(uint32_t tag, + const camera_metadata_t *meta); + +/** + * Retrieve the type of a tag. Returns -1 if no such tag is defined. + */ +ANDROID_API +int get_local_camera_metadata_tag_type(uint32_t tag, + const camera_metadata_t *meta); + +/** + * Set up vendor-specific tag query methods. These are needed to properly add + * entries with vendor-specified tags and to use the + * get_camera_metadata_section_name, _tag_name, and _tag_type methods with + * vendor tags. Returns 0 on success. + * + * **DEPRECATED** - Please use vendor_tag_ops defined in camera_vendor_tags.h + * instead. + */ +typedef struct vendor_tag_query_ops vendor_tag_query_ops_t; +struct vendor_tag_query_ops { + /** + * Get vendor section name for a vendor-specified entry tag. Only called for + * tags >= 0x80000000. The section name must start with the name of the + * vendor in the Java package style. For example, CameraZoom inc must prefix + * their sections with "com.camerazoom." Must return NULL if the tag is + * outside the bounds of vendor-defined sections. + */ + const char *(*get_camera_vendor_section_name)( + const vendor_tag_query_ops_t *v, + uint32_t tag); + /** + * Get tag name for a vendor-specified entry tag. Only called for tags >= + * 0x80000000. Must return NULL if the tag is outside the bounds of + * vendor-defined sections. + */ + const char *(*get_camera_vendor_tag_name)( + const vendor_tag_query_ops_t *v, + uint32_t tag); + /** + * Get tag type for a vendor-specified entry tag. Only called for tags >= + * 0x80000000. Must return -1 if the tag is outside the bounds of + * vendor-defined sections. + */ + int (*get_camera_vendor_tag_type)( + const vendor_tag_query_ops_t *v, + uint32_t tag); + /** + * Get the number of vendor tags supported on this platform. Used to + * calculate the size of buffer needed for holding the array of all tags + * returned by get_camera_vendor_tags(). + */ + int (*get_camera_vendor_tag_count)( + const vendor_tag_query_ops_t *v); + /** + * Fill an array with all the supported vendor tags on this platform. + * get_camera_vendor_tag_count() returns the number of tags supported, and + * tag_array should be allocated with enough space to hold all of the tags. + */ + void (*get_camera_vendor_tags)( + const vendor_tag_query_ops_t *v, + uint32_t *tag_array); +}; + +/** + * **DEPRECATED** - This should only be used by the camera framework. Camera + * metadata will transition to using vendor_tag_ops defined in + * camera_vendor_tags.h instead. + */ +ANDROID_API +int set_camera_metadata_vendor_tag_ops(const vendor_tag_query_ops_t *query_ops); + +/** + * Print fields in the metadata to the log. + * verbosity = 0: Only tag entry information + * verbosity = 1: Tag entry information plus at most 16 data values + * verbosity = 2: All information + */ +ANDROID_API +void dump_camera_metadata(const camera_metadata_t *metadata, + int fd, + int verbosity); + +/** + * Print fields in the metadata to the log; adds indentation parameter, which + * specifies the number of spaces to insert before each line of the dump + */ +ANDROID_API +void dump_indented_camera_metadata(const camera_metadata_t *metadata, + int fd, + int verbosity, + int indentation); + +/** + * Prints the specified tag value as a string. Only works for enum tags. + * Returns 0 on success, -1 on failure. + */ +ANDROID_API +int camera_metadata_enum_snprint(uint32_t tag, + uint32_t value, + char *dst, + size_t size); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/spider-cam/libcamera/include/android/metadata/system/camera_metadata_tags.h b/spider-cam/libcamera/include/android/metadata/system/camera_metadata_tags.h new file mode 100644 index 0000000..e0f2f5a --- /dev/null +++ b/spider-cam/libcamera/include/android/metadata/system/camera_metadata_tags.h @@ -0,0 +1,1006 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * !! Do not include this file directly !! + * + * Include camera_metadata.h instead. + */ + +/** + * ! Do not edit this file directly ! + * + * Generated automatically from camera_metadata_tags.mako + */ + +/** TODO: Nearly every enum in this file needs a description */ + +/** + * Top level hierarchy definitions for camera metadata. *_INFO sections are for + * the static metadata that can be retrived without opening the camera device. + * New sections must be added right before ANDROID_SECTION_COUNT to maintain + * existing enumerations. + */ +typedef enum camera_metadata_section { + ANDROID_COLOR_CORRECTION, + ANDROID_CONTROL, + ANDROID_DEMOSAIC, + ANDROID_EDGE, + ANDROID_FLASH, + ANDROID_FLASH_INFO, + ANDROID_HOT_PIXEL, + ANDROID_JPEG, + ANDROID_LENS, + ANDROID_LENS_INFO, + ANDROID_NOISE_REDUCTION, + ANDROID_QUIRKS, + ANDROID_REQUEST, + ANDROID_SCALER, + ANDROID_SENSOR, + ANDROID_SENSOR_INFO, + ANDROID_SHADING, + ANDROID_STATISTICS, + ANDROID_STATISTICS_INFO, + ANDROID_TONEMAP, + ANDROID_LED, + ANDROID_INFO, + ANDROID_BLACK_LEVEL, + ANDROID_SYNC, + ANDROID_REPROCESS, + ANDROID_DEPTH, + ANDROID_LOGICAL_MULTI_CAMERA, + ANDROID_DISTORTION_CORRECTION, + ANDROID_SECTION_COUNT, + + VENDOR_SECTION = 0x8000 +} camera_metadata_section_t; + +/** + * Hierarchy positions in enum space. All vendor extension tags must be + * defined with tag >= VENDOR_SECTION_START + */ +typedef enum camera_metadata_section_start { + ANDROID_COLOR_CORRECTION_START = ANDROID_COLOR_CORRECTION << 16, + ANDROID_CONTROL_START = ANDROID_CONTROL << 16, + ANDROID_DEMOSAIC_START = ANDROID_DEMOSAIC << 16, + ANDROID_EDGE_START = ANDROID_EDGE << 16, + ANDROID_FLASH_START = ANDROID_FLASH << 16, + ANDROID_FLASH_INFO_START = ANDROID_FLASH_INFO << 16, + ANDROID_HOT_PIXEL_START = ANDROID_HOT_PIXEL << 16, + ANDROID_JPEG_START = ANDROID_JPEG << 16, + ANDROID_LENS_START = ANDROID_LENS << 16, + ANDROID_LENS_INFO_START = ANDROID_LENS_INFO << 16, + ANDROID_NOISE_REDUCTION_START = ANDROID_NOISE_REDUCTION << 16, + ANDROID_QUIRKS_START = ANDROID_QUIRKS << 16, + ANDROID_REQUEST_START = ANDROID_REQUEST << 16, + ANDROID_SCALER_START = ANDROID_SCALER << 16, + ANDROID_SENSOR_START = ANDROID_SENSOR << 16, + ANDROID_SENSOR_INFO_START = ANDROID_SENSOR_INFO << 16, + ANDROID_SHADING_START = ANDROID_SHADING << 16, + ANDROID_STATISTICS_START = ANDROID_STATISTICS << 16, + ANDROID_STATISTICS_INFO_START = ANDROID_STATISTICS_INFO << 16, + ANDROID_TONEMAP_START = ANDROID_TONEMAP << 16, + ANDROID_LED_START = ANDROID_LED << 16, + ANDROID_INFO_START = ANDROID_INFO << 16, + ANDROID_BLACK_LEVEL_START = ANDROID_BLACK_LEVEL << 16, + ANDROID_SYNC_START = ANDROID_SYNC << 16, + ANDROID_REPROCESS_START = ANDROID_REPROCESS << 16, + ANDROID_DEPTH_START = ANDROID_DEPTH << 16, + ANDROID_LOGICAL_MULTI_CAMERA_START + = ANDROID_LOGICAL_MULTI_CAMERA + << 16, + ANDROID_DISTORTION_CORRECTION_START + = ANDROID_DISTORTION_CORRECTION + << 16, + VENDOR_SECTION_START = VENDOR_SECTION << 16 +} camera_metadata_section_start_t; + +/** + * Main enum for defining camera metadata tags. New entries must always go + * before the section _END tag to preserve existing enumeration values. In + * addition, the name and type of the tag needs to be added to + * system/media/camera/src/camera_metadata_tag_info.c + */ +typedef enum camera_metadata_tag { + ANDROID_COLOR_CORRECTION_MODE = // enum | public | HIDL v3.2 + ANDROID_COLOR_CORRECTION_START, + ANDROID_COLOR_CORRECTION_TRANSFORM, // rational[] | public | HIDL v3.2 + ANDROID_COLOR_CORRECTION_GAINS, // float[] | public | HIDL v3.2 + ANDROID_COLOR_CORRECTION_ABERRATION_MODE, // enum | public | HIDL v3.2 + ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES, + // byte[] | public | HIDL v3.2 + ANDROID_COLOR_CORRECTION_END, + + ANDROID_CONTROL_AE_ANTIBANDING_MODE = // enum | public | HIDL v3.2 + ANDROID_CONTROL_START, + ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, // int32 | public | HIDL v3.2 + ANDROID_CONTROL_AE_LOCK, // enum | public | HIDL v3.2 + ANDROID_CONTROL_AE_MODE, // enum | public | HIDL v3.2 + ANDROID_CONTROL_AE_REGIONS, // int32[] | public | HIDL v3.2 + ANDROID_CONTROL_AE_TARGET_FPS_RANGE, // int32[] | public | HIDL v3.2 + ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, // enum | public | HIDL v3.2 + ANDROID_CONTROL_AF_MODE, // enum | public | HIDL v3.2 + ANDROID_CONTROL_AF_REGIONS, // int32[] | public | HIDL v3.2 + ANDROID_CONTROL_AF_TRIGGER, // enum | public | HIDL v3.2 + ANDROID_CONTROL_AWB_LOCK, // enum | public | HIDL v3.2 + ANDROID_CONTROL_AWB_MODE, // enum | public | HIDL v3.2 + ANDROID_CONTROL_AWB_REGIONS, // int32[] | public | HIDL v3.2 + ANDROID_CONTROL_CAPTURE_INTENT, // enum | public | HIDL v3.2 + ANDROID_CONTROL_EFFECT_MODE, // enum | public | HIDL v3.2 + ANDROID_CONTROL_MODE, // enum | public | HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE, // enum | public | HIDL v3.2 + ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, // enum | public | HIDL v3.2 + ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES, // byte[] | public | HIDL v3.2 + ANDROID_CONTROL_AE_AVAILABLE_MODES, // byte[] | public | HIDL v3.2 + ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, // int32[] | public | HIDL v3.2 + ANDROID_CONTROL_AE_COMPENSATION_RANGE, // int32[] | public | HIDL v3.2 + ANDROID_CONTROL_AE_COMPENSATION_STEP, // rational | public | HIDL v3.2 + ANDROID_CONTROL_AF_AVAILABLE_MODES, // byte[] | public | HIDL v3.2 + ANDROID_CONTROL_AVAILABLE_EFFECTS, // byte[] | public | HIDL v3.2 + ANDROID_CONTROL_AVAILABLE_SCENE_MODES, // byte[] | public | HIDL v3.2 + ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, + // byte[] | public | HIDL v3.2 + ANDROID_CONTROL_AWB_AVAILABLE_MODES, // byte[] | public | HIDL v3.2 + ANDROID_CONTROL_MAX_REGIONS, // int32[] | ndk_public | HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_OVERRIDES, // byte[] | system | HIDL v3.2 + ANDROID_CONTROL_AE_PRECAPTURE_ID, // int32 | system | HIDL v3.2 + ANDROID_CONTROL_AE_STATE, // enum | public | HIDL v3.2 + ANDROID_CONTROL_AF_STATE, // enum | public | HIDL v3.2 + ANDROID_CONTROL_AF_TRIGGER_ID, // int32 | system | HIDL v3.2 + ANDROID_CONTROL_AWB_STATE, // enum | public | HIDL v3.2 + ANDROID_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS, + // int32[] | hidden | HIDL v3.2 + ANDROID_CONTROL_AE_LOCK_AVAILABLE, // enum | public | HIDL v3.2 + ANDROID_CONTROL_AWB_LOCK_AVAILABLE, // enum | public | HIDL v3.2 + ANDROID_CONTROL_AVAILABLE_MODES, // byte[] | public | HIDL v3.2 + ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE, // int32[] | public | HIDL v3.2 + ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST, // int32 | public | HIDL v3.2 + ANDROID_CONTROL_ENABLE_ZSL, // enum | public | HIDL v3.2 + ANDROID_CONTROL_AF_SCENE_CHANGE, // enum | public | HIDL v3.3 + ANDROID_CONTROL_END, + + ANDROID_DEMOSAIC_MODE = // enum | system | HIDL v3.2 + ANDROID_DEMOSAIC_START, + ANDROID_DEMOSAIC_END, + + ANDROID_EDGE_MODE = // enum | public | HIDL v3.2 + ANDROID_EDGE_START, + ANDROID_EDGE_STRENGTH, // byte | system | HIDL v3.2 + ANDROID_EDGE_AVAILABLE_EDGE_MODES, // byte[] | public | HIDL v3.2 + ANDROID_EDGE_END, + + ANDROID_FLASH_FIRING_POWER = // byte | system | HIDL v3.2 + ANDROID_FLASH_START, + ANDROID_FLASH_FIRING_TIME, // int64 | system | HIDL v3.2 + ANDROID_FLASH_MODE, // enum | public | HIDL v3.2 + ANDROID_FLASH_COLOR_TEMPERATURE, // byte | system | HIDL v3.2 + ANDROID_FLASH_MAX_ENERGY, // byte | system | HIDL v3.2 + ANDROID_FLASH_STATE, // enum | public | HIDL v3.2 + ANDROID_FLASH_END, + + ANDROID_FLASH_INFO_AVAILABLE = // enum | public | HIDL v3.2 + ANDROID_FLASH_INFO_START, + ANDROID_FLASH_INFO_CHARGE_DURATION, // int64 | system | HIDL v3.2 + ANDROID_FLASH_INFO_END, + + ANDROID_HOT_PIXEL_MODE = // enum | public | HIDL v3.2 + ANDROID_HOT_PIXEL_START, + ANDROID_HOT_PIXEL_AVAILABLE_HOT_PIXEL_MODES, // byte[] | public | HIDL v3.2 + ANDROID_HOT_PIXEL_END, + + ANDROID_JPEG_GPS_COORDINATES = // double[] | ndk_public | HIDL v3.2 + ANDROID_JPEG_START, + ANDROID_JPEG_GPS_PROCESSING_METHOD, // byte | ndk_public | HIDL v3.2 + ANDROID_JPEG_GPS_TIMESTAMP, // int64 | ndk_public | HIDL v3.2 + ANDROID_JPEG_ORIENTATION, // int32 | public | HIDL v3.2 + ANDROID_JPEG_QUALITY, // byte | public | HIDL v3.2 + ANDROID_JPEG_THUMBNAIL_QUALITY, // byte | public | HIDL v3.2 + ANDROID_JPEG_THUMBNAIL_SIZE, // int32[] | public | HIDL v3.2 + ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, // int32[] | public | HIDL v3.2 + ANDROID_JPEG_MAX_SIZE, // int32 | system | HIDL v3.2 + ANDROID_JPEG_SIZE, // int32 | system | HIDL v3.2 + ANDROID_JPEG_END, + + ANDROID_LENS_APERTURE = // float | public | HIDL v3.2 + ANDROID_LENS_START, + ANDROID_LENS_FILTER_DENSITY, // float | public | HIDL v3.2 + ANDROID_LENS_FOCAL_LENGTH, // float | public | HIDL v3.2 + ANDROID_LENS_FOCUS_DISTANCE, // float | public | HIDL v3.2 + ANDROID_LENS_OPTICAL_STABILIZATION_MODE, // enum | public | HIDL v3.2 + ANDROID_LENS_FACING, // enum | public | HIDL v3.2 + ANDROID_LENS_POSE_ROTATION, // float[] | public | HIDL v3.2 + ANDROID_LENS_POSE_TRANSLATION, // float[] | public | HIDL v3.2 + ANDROID_LENS_FOCUS_RANGE, // float[] | public | HIDL v3.2 + ANDROID_LENS_STATE, // enum | public | HIDL v3.2 + ANDROID_LENS_INTRINSIC_CALIBRATION, // float[] | public | HIDL v3.2 + ANDROID_LENS_RADIAL_DISTORTION, // float[] | public | HIDL v3.2 + ANDROID_LENS_POSE_REFERENCE, // enum | public | HIDL v3.3 + ANDROID_LENS_DISTORTION, // float[] | public | HIDL v3.3 + ANDROID_LENS_END, + + ANDROID_LENS_INFO_AVAILABLE_APERTURES = // float[] | public | HIDL v3.2 + ANDROID_LENS_INFO_START, + ANDROID_LENS_INFO_AVAILABLE_FILTER_DENSITIES, // float[] | public | HIDL v3.2 + ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, // float[] | public | HIDL v3.2 + ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,// byte[] | public | HIDL v3.2 + ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE, // float | public | HIDL v3.2 + ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, // float | public | HIDL v3.2 + ANDROID_LENS_INFO_SHADING_MAP_SIZE, // int32[] | ndk_public | HIDL v3.2 + ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION, // enum | public | HIDL v3.2 + ANDROID_LENS_INFO_END, + + ANDROID_NOISE_REDUCTION_MODE = // enum | public | HIDL v3.2 + ANDROID_NOISE_REDUCTION_START, + ANDROID_NOISE_REDUCTION_STRENGTH, // byte | system | HIDL v3.2 + ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES, + // byte[] | public | HIDL v3.2 + ANDROID_NOISE_REDUCTION_END, + + ANDROID_QUIRKS_METERING_CROP_REGION = // byte | system | HIDL v3.2 + ANDROID_QUIRKS_START, + ANDROID_QUIRKS_TRIGGER_AF_WITH_AUTO, // byte | system | HIDL v3.2 + ANDROID_QUIRKS_USE_ZSL_FORMAT, // byte | system | HIDL v3.2 + ANDROID_QUIRKS_USE_PARTIAL_RESULT, // byte | hidden | HIDL v3.2 + ANDROID_QUIRKS_PARTIAL_RESULT, // enum | hidden | HIDL v3.2 + ANDROID_QUIRKS_END, + + ANDROID_REQUEST_FRAME_COUNT = // int32 | hidden | HIDL v3.2 + ANDROID_REQUEST_START, + ANDROID_REQUEST_ID, // int32 | hidden | HIDL v3.2 + ANDROID_REQUEST_INPUT_STREAMS, // int32[] | system | HIDL v3.2 + ANDROID_REQUEST_METADATA_MODE, // enum | system | HIDL v3.2 + ANDROID_REQUEST_OUTPUT_STREAMS, // int32[] | system | HIDL v3.2 + ANDROID_REQUEST_TYPE, // enum | system | HIDL v3.2 + ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS, // int32[] | ndk_public | HIDL v3.2 + ANDROID_REQUEST_MAX_NUM_REPROCESS_STREAMS, // int32[] | system | HIDL v3.2 + ANDROID_REQUEST_MAX_NUM_INPUT_STREAMS, // int32 | java_public | HIDL v3.2 + ANDROID_REQUEST_PIPELINE_DEPTH, // byte | public | HIDL v3.2 + ANDROID_REQUEST_PIPELINE_MAX_DEPTH, // byte | public | HIDL v3.2 + ANDROID_REQUEST_PARTIAL_RESULT_COUNT, // int32 | public | HIDL v3.2 + ANDROID_REQUEST_AVAILABLE_CAPABILITIES, // enum[] | public | HIDL v3.2 + ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS, // int32[] | ndk_public | HIDL v3.2 + ANDROID_REQUEST_AVAILABLE_RESULT_KEYS, // int32[] | ndk_public | HIDL v3.2 + ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, // int32[] | ndk_public | HIDL v3.2 + ANDROID_REQUEST_AVAILABLE_SESSION_KEYS, // int32[] | ndk_public | HIDL v3.3 + ANDROID_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS, + // int32[] | hidden | HIDL v3.3 + ANDROID_REQUEST_END, + + ANDROID_SCALER_CROP_REGION = // int32[] | public | HIDL v3.2 + ANDROID_SCALER_START, + ANDROID_SCALER_AVAILABLE_FORMATS, // enum[] | hidden | HIDL v3.2 + ANDROID_SCALER_AVAILABLE_JPEG_MIN_DURATIONS, // int64[] | hidden | HIDL v3.2 + ANDROID_SCALER_AVAILABLE_JPEG_SIZES, // int32[] | hidden | HIDL v3.2 + ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, // float | public | HIDL v3.2 + ANDROID_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS, // int64[] | hidden | HIDL v3.2 + ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, // int32[] | hidden | HIDL v3.2 + ANDROID_SCALER_AVAILABLE_RAW_MIN_DURATIONS, // int64[] | system | HIDL v3.2 + ANDROID_SCALER_AVAILABLE_RAW_SIZES, // int32[] | system | HIDL v3.2 + ANDROID_SCALER_AVAILABLE_INPUT_OUTPUT_FORMATS_MAP,// int32 | hidden | HIDL v3.2 + ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, // enum[] | ndk_public | HIDL v3.2 + ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS, // int64[] | ndk_public | HIDL v3.2 + ANDROID_SCALER_AVAILABLE_STALL_DURATIONS, // int64[] | ndk_public | HIDL v3.2 + ANDROID_SCALER_CROPPING_TYPE, // enum | public | HIDL v3.2 + ANDROID_SCALER_END, + + ANDROID_SENSOR_EXPOSURE_TIME = // int64 | public | HIDL v3.2 + ANDROID_SENSOR_START, + ANDROID_SENSOR_FRAME_DURATION, // int64 | public | HIDL v3.2 + ANDROID_SENSOR_SENSITIVITY, // int32 | public | HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1, // enum | public | HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT2, // byte | public | HIDL v3.2 + ANDROID_SENSOR_CALIBRATION_TRANSFORM1, // rational[] | public | HIDL v3.2 + ANDROID_SENSOR_CALIBRATION_TRANSFORM2, // rational[] | public | HIDL v3.2 + ANDROID_SENSOR_COLOR_TRANSFORM1, // rational[] | public | HIDL v3.2 + ANDROID_SENSOR_COLOR_TRANSFORM2, // rational[] | public | HIDL v3.2 + ANDROID_SENSOR_FORWARD_MATRIX1, // rational[] | public | HIDL v3.2 + ANDROID_SENSOR_FORWARD_MATRIX2, // rational[] | public | HIDL v3.2 + ANDROID_SENSOR_BASE_GAIN_FACTOR, // rational | system | HIDL v3.2 + ANDROID_SENSOR_BLACK_LEVEL_PATTERN, // int32[] | public | HIDL v3.2 + ANDROID_SENSOR_MAX_ANALOG_SENSITIVITY, // int32 | public | HIDL v3.2 + ANDROID_SENSOR_ORIENTATION, // int32 | public | HIDL v3.2 + ANDROID_SENSOR_PROFILE_HUE_SAT_MAP_DIMENSIONS, // int32[] | system | HIDL v3.2 + ANDROID_SENSOR_TIMESTAMP, // int64 | public | HIDL v3.2 + ANDROID_SENSOR_TEMPERATURE, // float | system | HIDL v3.2 + ANDROID_SENSOR_NEUTRAL_COLOR_POINT, // rational[] | public | HIDL v3.2 + ANDROID_SENSOR_NOISE_PROFILE, // double[] | public | HIDL v3.2 + ANDROID_SENSOR_PROFILE_HUE_SAT_MAP, // float[] | system | HIDL v3.2 + ANDROID_SENSOR_PROFILE_TONE_CURVE, // float[] | system | HIDL v3.2 + ANDROID_SENSOR_GREEN_SPLIT, // float | public | HIDL v3.2 + ANDROID_SENSOR_TEST_PATTERN_DATA, // int32[] | public | HIDL v3.2 + ANDROID_SENSOR_TEST_PATTERN_MODE, // enum | public | HIDL v3.2 + ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES, // int32[] | public | HIDL v3.2 + ANDROID_SENSOR_ROLLING_SHUTTER_SKEW, // int64 | public | HIDL v3.2 + ANDROID_SENSOR_OPTICAL_BLACK_REGIONS, // int32[] | public | HIDL v3.2 + ANDROID_SENSOR_DYNAMIC_BLACK_LEVEL, // float[] | public | HIDL v3.2 + ANDROID_SENSOR_DYNAMIC_WHITE_LEVEL, // int32 | public | HIDL v3.2 + ANDROID_SENSOR_OPAQUE_RAW_SIZE, // int32[] | system | HIDL v3.2 + ANDROID_SENSOR_END, + + ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE = // int32[] | public | HIDL v3.2 + ANDROID_SENSOR_INFO_START, + ANDROID_SENSOR_INFO_SENSITIVITY_RANGE, // int32[] | public | HIDL v3.2 + ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT, // enum | public | HIDL v3.2 + ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE, // int64[] | public | HIDL v3.2 + ANDROID_SENSOR_INFO_MAX_FRAME_DURATION, // int64 | public | HIDL v3.2 + ANDROID_SENSOR_INFO_PHYSICAL_SIZE, // float[] | public | HIDL v3.2 + ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE, // int32[] | public | HIDL v3.2 + ANDROID_SENSOR_INFO_WHITE_LEVEL, // int32 | public | HIDL v3.2 + ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE, // enum | public | HIDL v3.2 + ANDROID_SENSOR_INFO_LENS_SHADING_APPLIED, // enum | public | HIDL v3.2 + ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, + // int32[] | public | HIDL v3.2 + ANDROID_SENSOR_INFO_END, + + ANDROID_SHADING_MODE = // enum | public | HIDL v3.2 + ANDROID_SHADING_START, + ANDROID_SHADING_STRENGTH, // byte | system | HIDL v3.2 + ANDROID_SHADING_AVAILABLE_MODES, // byte[] | public | HIDL v3.2 + ANDROID_SHADING_END, + + ANDROID_STATISTICS_FACE_DETECT_MODE = // enum | public | HIDL v3.2 + ANDROID_STATISTICS_START, + ANDROID_STATISTICS_HISTOGRAM_MODE, // enum | system | HIDL v3.2 + ANDROID_STATISTICS_SHARPNESS_MAP_MODE, // enum | system | HIDL v3.2 + ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, // enum | public | HIDL v3.2 + ANDROID_STATISTICS_FACE_IDS, // int32[] | ndk_public | HIDL v3.2 + ANDROID_STATISTICS_FACE_LANDMARKS, // int32[] | ndk_public | HIDL v3.2 + ANDROID_STATISTICS_FACE_RECTANGLES, // int32[] | ndk_public | HIDL v3.2 + ANDROID_STATISTICS_FACE_SCORES, // byte[] | ndk_public | HIDL v3.2 + ANDROID_STATISTICS_HISTOGRAM, // int32[] | system | HIDL v3.2 + ANDROID_STATISTICS_SHARPNESS_MAP, // int32[] | system | HIDL v3.2 + ANDROID_STATISTICS_LENS_SHADING_CORRECTION_MAP, // byte | java_public | HIDL v3.2 + ANDROID_STATISTICS_LENS_SHADING_MAP, // float[] | ndk_public | HIDL v3.2 + ANDROID_STATISTICS_PREDICTED_COLOR_GAINS, // float[] | hidden | HIDL v3.2 + ANDROID_STATISTICS_PREDICTED_COLOR_TRANSFORM, // rational[] | hidden | HIDL v3.2 + ANDROID_STATISTICS_SCENE_FLICKER, // enum | public | HIDL v3.2 + ANDROID_STATISTICS_HOT_PIXEL_MAP, // int32[] | public | HIDL v3.2 + ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, // enum | public | HIDL v3.2 + ANDROID_STATISTICS_OIS_DATA_MODE, // enum | public | HIDL v3.3 + ANDROID_STATISTICS_OIS_TIMESTAMPS, // int64[] | ndk_public | HIDL v3.3 + ANDROID_STATISTICS_OIS_X_SHIFTS, // float[] | ndk_public | HIDL v3.3 + ANDROID_STATISTICS_OIS_Y_SHIFTS, // float[] | ndk_public | HIDL v3.3 + ANDROID_STATISTICS_END, + + ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES = + // byte[] | public | HIDL v3.2 + ANDROID_STATISTICS_INFO_START, + ANDROID_STATISTICS_INFO_HISTOGRAM_BUCKET_COUNT, // int32 | system | HIDL v3.2 + ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, // int32 | public | HIDL v3.2 + ANDROID_STATISTICS_INFO_MAX_HISTOGRAM_COUNT, // int32 | system | HIDL v3.2 + ANDROID_STATISTICS_INFO_MAX_SHARPNESS_MAP_VALUE, // int32 | system | HIDL v3.2 + ANDROID_STATISTICS_INFO_SHARPNESS_MAP_SIZE, // int32[] | system | HIDL v3.2 + ANDROID_STATISTICS_INFO_AVAILABLE_HOT_PIXEL_MAP_MODES, + // byte[] | public | HIDL v3.2 + ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES, + // byte[] | public | HIDL v3.2 + ANDROID_STATISTICS_INFO_AVAILABLE_OIS_DATA_MODES, // byte[] | public | HIDL v3.3 + ANDROID_STATISTICS_INFO_END, + + ANDROID_TONEMAP_CURVE_BLUE = // float[] | ndk_public | HIDL v3.2 + ANDROID_TONEMAP_START, + ANDROID_TONEMAP_CURVE_GREEN, // float[] | ndk_public | HIDL v3.2 + ANDROID_TONEMAP_CURVE_RED, // float[] | ndk_public | HIDL v3.2 + ANDROID_TONEMAP_MODE, // enum | public | HIDL v3.2 + ANDROID_TONEMAP_MAX_CURVE_POINTS, // int32 | public | HIDL v3.2 + ANDROID_TONEMAP_AVAILABLE_TONE_MAP_MODES, // byte[] | public | HIDL v3.2 + ANDROID_TONEMAP_GAMMA, // float | public | HIDL v3.2 + ANDROID_TONEMAP_PRESET_CURVE, // enum | public | HIDL v3.2 + ANDROID_TONEMAP_END, + + ANDROID_LED_TRANSMIT = // enum | hidden | HIDL v3.2 + ANDROID_LED_START, + ANDROID_LED_AVAILABLE_LEDS, // enum[] | hidden | HIDL v3.2 + ANDROID_LED_END, + + ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL = // enum | public | HIDL v3.2 + ANDROID_INFO_START, + ANDROID_INFO_VERSION, // byte | public | HIDL v3.3 + ANDROID_INFO_END, + + ANDROID_BLACK_LEVEL_LOCK = // enum | public | HIDL v3.2 + ANDROID_BLACK_LEVEL_START, + ANDROID_BLACK_LEVEL_END, + + ANDROID_SYNC_FRAME_NUMBER = // enum | ndk_public | HIDL v3.2 + ANDROID_SYNC_START, + ANDROID_SYNC_MAX_LATENCY, // enum | public | HIDL v3.2 + ANDROID_SYNC_END, + + ANDROID_REPROCESS_EFFECTIVE_EXPOSURE_FACTOR = // float | java_public | HIDL v3.2 + ANDROID_REPROCESS_START, + ANDROID_REPROCESS_MAX_CAPTURE_STALL, // int32 | java_public | HIDL v3.2 + ANDROID_REPROCESS_END, + + ANDROID_DEPTH_MAX_DEPTH_SAMPLES = // int32 | system | HIDL v3.2 + ANDROID_DEPTH_START, + ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS, + // enum[] | ndk_public | HIDL v3.2 + ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS,// int64[] | ndk_public | HIDL v3.2 + ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS, // int64[] | ndk_public | HIDL v3.2 + ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE, // enum | public | HIDL v3.2 + ANDROID_DEPTH_END, + + ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS = // byte[] | hidden | HIDL v3.3 + ANDROID_LOGICAL_MULTI_CAMERA_START, + ANDROID_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE, // enum | public | HIDL v3.3 + ANDROID_LOGICAL_MULTI_CAMERA_END, + + ANDROID_DISTORTION_CORRECTION_MODE = // enum | public | HIDL v3.3 + ANDROID_DISTORTION_CORRECTION_START, + ANDROID_DISTORTION_CORRECTION_AVAILABLE_MODES, // byte[] | public | HIDL v3.3 + ANDROID_DISTORTION_CORRECTION_END, + +} camera_metadata_tag_t; + +/** + * Enumeration definitions for the various entries that need them + */ + +// ANDROID_COLOR_CORRECTION_MODE +typedef enum camera_metadata_enum_android_color_correction_mode { + ANDROID_COLOR_CORRECTION_MODE_TRANSFORM_MATRIX , // HIDL v3.2 + ANDROID_COLOR_CORRECTION_MODE_FAST , // HIDL v3.2 + ANDROID_COLOR_CORRECTION_MODE_HIGH_QUALITY , // HIDL v3.2 +} camera_metadata_enum_android_color_correction_mode_t; + +// ANDROID_COLOR_CORRECTION_ABERRATION_MODE +typedef enum camera_metadata_enum_android_color_correction_aberration_mode { + ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF , // HIDL v3.2 + ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST , // HIDL v3.2 + ANDROID_COLOR_CORRECTION_ABERRATION_MODE_HIGH_QUALITY , // HIDL v3.2 +} camera_metadata_enum_android_color_correction_aberration_mode_t; + + +// ANDROID_CONTROL_AE_ANTIBANDING_MODE +typedef enum camera_metadata_enum_android_control_ae_antibanding_mode { + ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF , // HIDL v3.2 + ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ , // HIDL v3.2 + ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ , // HIDL v3.2 + ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO , // HIDL v3.2 +} camera_metadata_enum_android_control_ae_antibanding_mode_t; + +// ANDROID_CONTROL_AE_LOCK +typedef enum camera_metadata_enum_android_control_ae_lock { + ANDROID_CONTROL_AE_LOCK_OFF , // HIDL v3.2 + ANDROID_CONTROL_AE_LOCK_ON , // HIDL v3.2 +} camera_metadata_enum_android_control_ae_lock_t; + +// ANDROID_CONTROL_AE_MODE +typedef enum camera_metadata_enum_android_control_ae_mode { + ANDROID_CONTROL_AE_MODE_OFF , // HIDL v3.2 + ANDROID_CONTROL_AE_MODE_ON , // HIDL v3.2 + ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH , // HIDL v3.2 + ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH , // HIDL v3.2 + ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE , // HIDL v3.2 + ANDROID_CONTROL_AE_MODE_ON_EXTERNAL_FLASH , // HIDL v3.3 +} camera_metadata_enum_android_control_ae_mode_t; + +// ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER +typedef enum camera_metadata_enum_android_control_ae_precapture_trigger { + ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE , // HIDL v3.2 + ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_START , // HIDL v3.2 + ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL , // HIDL v3.2 +} camera_metadata_enum_android_control_ae_precapture_trigger_t; + +// ANDROID_CONTROL_AF_MODE +typedef enum camera_metadata_enum_android_control_af_mode { + ANDROID_CONTROL_AF_MODE_OFF , // HIDL v3.2 + ANDROID_CONTROL_AF_MODE_AUTO , // HIDL v3.2 + ANDROID_CONTROL_AF_MODE_MACRO , // HIDL v3.2 + ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO , // HIDL v3.2 + ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE , // HIDL v3.2 + ANDROID_CONTROL_AF_MODE_EDOF , // HIDL v3.2 +} camera_metadata_enum_android_control_af_mode_t; + +// ANDROID_CONTROL_AF_TRIGGER +typedef enum camera_metadata_enum_android_control_af_trigger { + ANDROID_CONTROL_AF_TRIGGER_IDLE , // HIDL v3.2 + ANDROID_CONTROL_AF_TRIGGER_START , // HIDL v3.2 + ANDROID_CONTROL_AF_TRIGGER_CANCEL , // HIDL v3.2 +} camera_metadata_enum_android_control_af_trigger_t; + +// ANDROID_CONTROL_AWB_LOCK +typedef enum camera_metadata_enum_android_control_awb_lock { + ANDROID_CONTROL_AWB_LOCK_OFF , // HIDL v3.2 + ANDROID_CONTROL_AWB_LOCK_ON , // HIDL v3.2 +} camera_metadata_enum_android_control_awb_lock_t; + +// ANDROID_CONTROL_AWB_MODE +typedef enum camera_metadata_enum_android_control_awb_mode { + ANDROID_CONTROL_AWB_MODE_OFF , // HIDL v3.2 + ANDROID_CONTROL_AWB_MODE_AUTO , // HIDL v3.2 + ANDROID_CONTROL_AWB_MODE_INCANDESCENT , // HIDL v3.2 + ANDROID_CONTROL_AWB_MODE_FLUORESCENT , // HIDL v3.2 + ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT , // HIDL v3.2 + ANDROID_CONTROL_AWB_MODE_DAYLIGHT , // HIDL v3.2 + ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT , // HIDL v3.2 + ANDROID_CONTROL_AWB_MODE_TWILIGHT , // HIDL v3.2 + ANDROID_CONTROL_AWB_MODE_SHADE , // HIDL v3.2 +} camera_metadata_enum_android_control_awb_mode_t; + +// ANDROID_CONTROL_CAPTURE_INTENT +typedef enum camera_metadata_enum_android_control_capture_intent { + ANDROID_CONTROL_CAPTURE_INTENT_CUSTOM , // HIDL v3.2 + ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW , // HIDL v3.2 + ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE , // HIDL v3.2 + ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD , // HIDL v3.2 + ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT , // HIDL v3.2 + ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG , // HIDL v3.2 + ANDROID_CONTROL_CAPTURE_INTENT_MANUAL , // HIDL v3.2 + ANDROID_CONTROL_CAPTURE_INTENT_MOTION_TRACKING , // HIDL v3.3 +} camera_metadata_enum_android_control_capture_intent_t; + +// ANDROID_CONTROL_EFFECT_MODE +typedef enum camera_metadata_enum_android_control_effect_mode { + ANDROID_CONTROL_EFFECT_MODE_OFF , // HIDL v3.2 + ANDROID_CONTROL_EFFECT_MODE_MONO , // HIDL v3.2 + ANDROID_CONTROL_EFFECT_MODE_NEGATIVE , // HIDL v3.2 + ANDROID_CONTROL_EFFECT_MODE_SOLARIZE , // HIDL v3.2 + ANDROID_CONTROL_EFFECT_MODE_SEPIA , // HIDL v3.2 + ANDROID_CONTROL_EFFECT_MODE_POSTERIZE , // HIDL v3.2 + ANDROID_CONTROL_EFFECT_MODE_WHITEBOARD , // HIDL v3.2 + ANDROID_CONTROL_EFFECT_MODE_BLACKBOARD , // HIDL v3.2 + ANDROID_CONTROL_EFFECT_MODE_AQUA , // HIDL v3.2 +} camera_metadata_enum_android_control_effect_mode_t; + +// ANDROID_CONTROL_MODE +typedef enum camera_metadata_enum_android_control_mode { + ANDROID_CONTROL_MODE_OFF , // HIDL v3.2 + ANDROID_CONTROL_MODE_AUTO , // HIDL v3.2 + ANDROID_CONTROL_MODE_USE_SCENE_MODE , // HIDL v3.2 + ANDROID_CONTROL_MODE_OFF_KEEP_STATE , // HIDL v3.2 +} camera_metadata_enum_android_control_mode_t; + +// ANDROID_CONTROL_SCENE_MODE +typedef enum camera_metadata_enum_android_control_scene_mode { + ANDROID_CONTROL_SCENE_MODE_DISABLED = 0, // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_ACTION , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_PORTRAIT , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_LANDSCAPE , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_NIGHT , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_NIGHT_PORTRAIT , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_THEATRE , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_BEACH , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_SNOW , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_SUNSET , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_STEADYPHOTO , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_FIREWORKS , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_SPORTS , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_PARTY , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_CANDLELIGHT , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_BARCODE , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_HDR , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY_LOW_LIGHT , // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_DEVICE_CUSTOM_START = 100, // HIDL v3.2 + ANDROID_CONTROL_SCENE_MODE_DEVICE_CUSTOM_END = 127, // HIDL v3.2 +} camera_metadata_enum_android_control_scene_mode_t; + +// ANDROID_CONTROL_VIDEO_STABILIZATION_MODE +typedef enum camera_metadata_enum_android_control_video_stabilization_mode { + ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF , // HIDL v3.2 + ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_ON , // HIDL v3.2 +} camera_metadata_enum_android_control_video_stabilization_mode_t; + +// ANDROID_CONTROL_AE_STATE +typedef enum camera_metadata_enum_android_control_ae_state { + ANDROID_CONTROL_AE_STATE_INACTIVE , // HIDL v3.2 + ANDROID_CONTROL_AE_STATE_SEARCHING , // HIDL v3.2 + ANDROID_CONTROL_AE_STATE_CONVERGED , // HIDL v3.2 + ANDROID_CONTROL_AE_STATE_LOCKED , // HIDL v3.2 + ANDROID_CONTROL_AE_STATE_FLASH_REQUIRED , // HIDL v3.2 + ANDROID_CONTROL_AE_STATE_PRECAPTURE , // HIDL v3.2 +} camera_metadata_enum_android_control_ae_state_t; + +// ANDROID_CONTROL_AF_STATE +typedef enum camera_metadata_enum_android_control_af_state { + ANDROID_CONTROL_AF_STATE_INACTIVE , // HIDL v3.2 + ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN , // HIDL v3.2 + ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED , // HIDL v3.2 + ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN , // HIDL v3.2 + ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED , // HIDL v3.2 + ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED , // HIDL v3.2 + ANDROID_CONTROL_AF_STATE_PASSIVE_UNFOCUSED , // HIDL v3.2 +} camera_metadata_enum_android_control_af_state_t; + +// ANDROID_CONTROL_AWB_STATE +typedef enum camera_metadata_enum_android_control_awb_state { + ANDROID_CONTROL_AWB_STATE_INACTIVE , // HIDL v3.2 + ANDROID_CONTROL_AWB_STATE_SEARCHING , // HIDL v3.2 + ANDROID_CONTROL_AWB_STATE_CONVERGED , // HIDL v3.2 + ANDROID_CONTROL_AWB_STATE_LOCKED , // HIDL v3.2 +} camera_metadata_enum_android_control_awb_state_t; + +// ANDROID_CONTROL_AE_LOCK_AVAILABLE +typedef enum camera_metadata_enum_android_control_ae_lock_available { + ANDROID_CONTROL_AE_LOCK_AVAILABLE_FALSE , // HIDL v3.2 + ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE , // HIDL v3.2 +} camera_metadata_enum_android_control_ae_lock_available_t; + +// ANDROID_CONTROL_AWB_LOCK_AVAILABLE +typedef enum camera_metadata_enum_android_control_awb_lock_available { + ANDROID_CONTROL_AWB_LOCK_AVAILABLE_FALSE , // HIDL v3.2 + ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE , // HIDL v3.2 +} camera_metadata_enum_android_control_awb_lock_available_t; + +// ANDROID_CONTROL_ENABLE_ZSL +typedef enum camera_metadata_enum_android_control_enable_zsl { + ANDROID_CONTROL_ENABLE_ZSL_FALSE , // HIDL v3.2 + ANDROID_CONTROL_ENABLE_ZSL_TRUE , // HIDL v3.2 +} camera_metadata_enum_android_control_enable_zsl_t; + +// ANDROID_CONTROL_AF_SCENE_CHANGE +typedef enum camera_metadata_enum_android_control_af_scene_change { + ANDROID_CONTROL_AF_SCENE_CHANGE_NOT_DETECTED , // HIDL v3.3 + ANDROID_CONTROL_AF_SCENE_CHANGE_DETECTED , // HIDL v3.3 +} camera_metadata_enum_android_control_af_scene_change_t; + + +// ANDROID_DEMOSAIC_MODE +typedef enum camera_metadata_enum_android_demosaic_mode { + ANDROID_DEMOSAIC_MODE_FAST , // HIDL v3.2 + ANDROID_DEMOSAIC_MODE_HIGH_QUALITY , // HIDL v3.2 +} camera_metadata_enum_android_demosaic_mode_t; + + +// ANDROID_EDGE_MODE +typedef enum camera_metadata_enum_android_edge_mode { + ANDROID_EDGE_MODE_OFF , // HIDL v3.2 + ANDROID_EDGE_MODE_FAST , // HIDL v3.2 + ANDROID_EDGE_MODE_HIGH_QUALITY , // HIDL v3.2 + ANDROID_EDGE_MODE_ZERO_SHUTTER_LAG , // HIDL v3.2 +} camera_metadata_enum_android_edge_mode_t; + + +// ANDROID_FLASH_MODE +typedef enum camera_metadata_enum_android_flash_mode { + ANDROID_FLASH_MODE_OFF , // HIDL v3.2 + ANDROID_FLASH_MODE_SINGLE , // HIDL v3.2 + ANDROID_FLASH_MODE_TORCH , // HIDL v3.2 +} camera_metadata_enum_android_flash_mode_t; + +// ANDROID_FLASH_STATE +typedef enum camera_metadata_enum_android_flash_state { + ANDROID_FLASH_STATE_UNAVAILABLE , // HIDL v3.2 + ANDROID_FLASH_STATE_CHARGING , // HIDL v3.2 + ANDROID_FLASH_STATE_READY , // HIDL v3.2 + ANDROID_FLASH_STATE_FIRED , // HIDL v3.2 + ANDROID_FLASH_STATE_PARTIAL , // HIDL v3.2 +} camera_metadata_enum_android_flash_state_t; + + +// ANDROID_FLASH_INFO_AVAILABLE +typedef enum camera_metadata_enum_android_flash_info_available { + ANDROID_FLASH_INFO_AVAILABLE_FALSE , // HIDL v3.2 + ANDROID_FLASH_INFO_AVAILABLE_TRUE , // HIDL v3.2 +} camera_metadata_enum_android_flash_info_available_t; + + +// ANDROID_HOT_PIXEL_MODE +typedef enum camera_metadata_enum_android_hot_pixel_mode { + ANDROID_HOT_PIXEL_MODE_OFF , // HIDL v3.2 + ANDROID_HOT_PIXEL_MODE_FAST , // HIDL v3.2 + ANDROID_HOT_PIXEL_MODE_HIGH_QUALITY , // HIDL v3.2 +} camera_metadata_enum_android_hot_pixel_mode_t; + + + +// ANDROID_LENS_OPTICAL_STABILIZATION_MODE +typedef enum camera_metadata_enum_android_lens_optical_stabilization_mode { + ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF , // HIDL v3.2 + ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON , // HIDL v3.2 +} camera_metadata_enum_android_lens_optical_stabilization_mode_t; + +// ANDROID_LENS_FACING +typedef enum camera_metadata_enum_android_lens_facing { + ANDROID_LENS_FACING_FRONT , // HIDL v3.2 + ANDROID_LENS_FACING_BACK , // HIDL v3.2 + ANDROID_LENS_FACING_EXTERNAL , // HIDL v3.2 +} camera_metadata_enum_android_lens_facing_t; + +// ANDROID_LENS_STATE +typedef enum camera_metadata_enum_android_lens_state { + ANDROID_LENS_STATE_STATIONARY , // HIDL v3.2 + ANDROID_LENS_STATE_MOVING , // HIDL v3.2 +} camera_metadata_enum_android_lens_state_t; + +// ANDROID_LENS_POSE_REFERENCE +typedef enum camera_metadata_enum_android_lens_pose_reference { + ANDROID_LENS_POSE_REFERENCE_PRIMARY_CAMERA , // HIDL v3.3 + ANDROID_LENS_POSE_REFERENCE_GYROSCOPE , // HIDL v3.3 +} camera_metadata_enum_android_lens_pose_reference_t; + + +// ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION +typedef enum camera_metadata_enum_android_lens_info_focus_distance_calibration { + ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_UNCALIBRATED , // HIDL v3.2 + ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_APPROXIMATE , // HIDL v3.2 + ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_CALIBRATED , // HIDL v3.2 +} camera_metadata_enum_android_lens_info_focus_distance_calibration_t; + + +// ANDROID_NOISE_REDUCTION_MODE +typedef enum camera_metadata_enum_android_noise_reduction_mode { + ANDROID_NOISE_REDUCTION_MODE_OFF , // HIDL v3.2 + ANDROID_NOISE_REDUCTION_MODE_FAST , // HIDL v3.2 + ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY , // HIDL v3.2 + ANDROID_NOISE_REDUCTION_MODE_MINIMAL , // HIDL v3.2 + ANDROID_NOISE_REDUCTION_MODE_ZERO_SHUTTER_LAG , // HIDL v3.2 +} camera_metadata_enum_android_noise_reduction_mode_t; + + +// ANDROID_QUIRKS_PARTIAL_RESULT +typedef enum camera_metadata_enum_android_quirks_partial_result { + ANDROID_QUIRKS_PARTIAL_RESULT_FINAL , // HIDL v3.2 + ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL , // HIDL v3.2 +} camera_metadata_enum_android_quirks_partial_result_t; + + +// ANDROID_REQUEST_METADATA_MODE +typedef enum camera_metadata_enum_android_request_metadata_mode { + ANDROID_REQUEST_METADATA_MODE_NONE , // HIDL v3.2 + ANDROID_REQUEST_METADATA_MODE_FULL , // HIDL v3.2 +} camera_metadata_enum_android_request_metadata_mode_t; + +// ANDROID_REQUEST_TYPE +typedef enum camera_metadata_enum_android_request_type { + ANDROID_REQUEST_TYPE_CAPTURE , // HIDL v3.2 + ANDROID_REQUEST_TYPE_REPROCESS , // HIDL v3.2 +} camera_metadata_enum_android_request_type_t; + +// ANDROID_REQUEST_AVAILABLE_CAPABILITIES +typedef enum camera_metadata_enum_android_request_available_capabilities { + ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE , // HIDL v3.2 + ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR , // HIDL v3.2 + ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING , // HIDL v3.2 + ANDROID_REQUEST_AVAILABLE_CAPABILITIES_RAW , // HIDL v3.2 + ANDROID_REQUEST_AVAILABLE_CAPABILITIES_PRIVATE_REPROCESSING , // HIDL v3.2 + ANDROID_REQUEST_AVAILABLE_CAPABILITIES_READ_SENSOR_SETTINGS , // HIDL v3.2 + ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE , // HIDL v3.2 + ANDROID_REQUEST_AVAILABLE_CAPABILITIES_YUV_REPROCESSING , // HIDL v3.2 + ANDROID_REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT , // HIDL v3.2 + ANDROID_REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO + , // HIDL v3.2 + ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MOTION_TRACKING , // HIDL v3.3 + ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA , // HIDL v3.3 + ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MONOCHROME , // HIDL v3.3 +} camera_metadata_enum_android_request_available_capabilities_t; + + +// ANDROID_SCALER_AVAILABLE_FORMATS +typedef enum camera_metadata_enum_android_scaler_available_formats { + ANDROID_SCALER_AVAILABLE_FORMATS_RAW16 = 0x20, // HIDL v3.2 + ANDROID_SCALER_AVAILABLE_FORMATS_RAW_OPAQUE = 0x24, // HIDL v3.2 + ANDROID_SCALER_AVAILABLE_FORMATS_YV12 = 0x32315659, // HIDL v3.2 + ANDROID_SCALER_AVAILABLE_FORMATS_YCrCb_420_SP = 0x11, // HIDL v3.2 + ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED = 0x22, // HIDL v3.2 + ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888 = 0x23, // HIDL v3.2 + ANDROID_SCALER_AVAILABLE_FORMATS_BLOB = 0x21, // HIDL v3.2 +} camera_metadata_enum_android_scaler_available_formats_t; + +// ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS +typedef enum camera_metadata_enum_android_scaler_available_stream_configurations { + ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT , // HIDL v3.2 + ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_INPUT , // HIDL v3.2 +} camera_metadata_enum_android_scaler_available_stream_configurations_t; + +// ANDROID_SCALER_CROPPING_TYPE +typedef enum camera_metadata_enum_android_scaler_cropping_type { + ANDROID_SCALER_CROPPING_TYPE_CENTER_ONLY , // HIDL v3.2 + ANDROID_SCALER_CROPPING_TYPE_FREEFORM , // HIDL v3.2 +} camera_metadata_enum_android_scaler_cropping_type_t; + + +// ANDROID_SENSOR_REFERENCE_ILLUMINANT1 +typedef enum camera_metadata_enum_android_sensor_reference_illuminant1 { + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT = 1, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_FLUORESCENT = 2, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_TUNGSTEN = 3, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_FLASH = 4, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_FINE_WEATHER = 9, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_CLOUDY_WEATHER = 10, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_SHADE = 11, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT_FLUORESCENT = 12, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAY_WHITE_FLUORESCENT = 13, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_COOL_WHITE_FLUORESCENT = 14, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_WHITE_FLUORESCENT = 15, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_STANDARD_A = 17, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_STANDARD_B = 18, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_STANDARD_C = 19, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D55 = 20, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D65 = 21, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D75 = 22, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D50 = 23, // HIDL v3.2 + ANDROID_SENSOR_REFERENCE_ILLUMINANT1_ISO_STUDIO_TUNGSTEN = 24, // HIDL v3.2 +} camera_metadata_enum_android_sensor_reference_illuminant1_t; + +// ANDROID_SENSOR_TEST_PATTERN_MODE +typedef enum camera_metadata_enum_android_sensor_test_pattern_mode { + ANDROID_SENSOR_TEST_PATTERN_MODE_OFF , // HIDL v3.2 + ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR , // HIDL v3.2 + ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS , // HIDL v3.2 + ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS_FADE_TO_GRAY , // HIDL v3.2 + ANDROID_SENSOR_TEST_PATTERN_MODE_PN9 , // HIDL v3.2 + ANDROID_SENSOR_TEST_PATTERN_MODE_CUSTOM1 = 256, // HIDL v3.2 +} camera_metadata_enum_android_sensor_test_pattern_mode_t; + + +// ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT +typedef enum camera_metadata_enum_android_sensor_info_color_filter_arrangement { + ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_RGGB , // HIDL v3.2 + ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_GRBG , // HIDL v3.2 + ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_GBRG , // HIDL v3.2 + ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_BGGR , // HIDL v3.2 + ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_RGB , // HIDL v3.2 +} camera_metadata_enum_android_sensor_info_color_filter_arrangement_t; + +// ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE +typedef enum camera_metadata_enum_android_sensor_info_timestamp_source { + ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_UNKNOWN , // HIDL v3.2 + ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_REALTIME , // HIDL v3.2 +} camera_metadata_enum_android_sensor_info_timestamp_source_t; + +// ANDROID_SENSOR_INFO_LENS_SHADING_APPLIED +typedef enum camera_metadata_enum_android_sensor_info_lens_shading_applied { + ANDROID_SENSOR_INFO_LENS_SHADING_APPLIED_FALSE , // HIDL v3.2 + ANDROID_SENSOR_INFO_LENS_SHADING_APPLIED_TRUE , // HIDL v3.2 +} camera_metadata_enum_android_sensor_info_lens_shading_applied_t; + + +// ANDROID_SHADING_MODE +typedef enum camera_metadata_enum_android_shading_mode { + ANDROID_SHADING_MODE_OFF , // HIDL v3.2 + ANDROID_SHADING_MODE_FAST , // HIDL v3.2 + ANDROID_SHADING_MODE_HIGH_QUALITY , // HIDL v3.2 +} camera_metadata_enum_android_shading_mode_t; + + +// ANDROID_STATISTICS_FACE_DETECT_MODE +typedef enum camera_metadata_enum_android_statistics_face_detect_mode { + ANDROID_STATISTICS_FACE_DETECT_MODE_OFF , // HIDL v3.2 + ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE , // HIDL v3.2 + ANDROID_STATISTICS_FACE_DETECT_MODE_FULL , // HIDL v3.2 +} camera_metadata_enum_android_statistics_face_detect_mode_t; + +// ANDROID_STATISTICS_HISTOGRAM_MODE +typedef enum camera_metadata_enum_android_statistics_histogram_mode { + ANDROID_STATISTICS_HISTOGRAM_MODE_OFF , // HIDL v3.2 + ANDROID_STATISTICS_HISTOGRAM_MODE_ON , // HIDL v3.2 +} camera_metadata_enum_android_statistics_histogram_mode_t; + +// ANDROID_STATISTICS_SHARPNESS_MAP_MODE +typedef enum camera_metadata_enum_android_statistics_sharpness_map_mode { + ANDROID_STATISTICS_SHARPNESS_MAP_MODE_OFF , // HIDL v3.2 + ANDROID_STATISTICS_SHARPNESS_MAP_MODE_ON , // HIDL v3.2 +} camera_metadata_enum_android_statistics_sharpness_map_mode_t; + +// ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE +typedef enum camera_metadata_enum_android_statistics_hot_pixel_map_mode { + ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF , // HIDL v3.2 + ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_ON , // HIDL v3.2 +} camera_metadata_enum_android_statistics_hot_pixel_map_mode_t; + +// ANDROID_STATISTICS_SCENE_FLICKER +typedef enum camera_metadata_enum_android_statistics_scene_flicker { + ANDROID_STATISTICS_SCENE_FLICKER_NONE , // HIDL v3.2 + ANDROID_STATISTICS_SCENE_FLICKER_50HZ , // HIDL v3.2 + ANDROID_STATISTICS_SCENE_FLICKER_60HZ , // HIDL v3.2 +} camera_metadata_enum_android_statistics_scene_flicker_t; + +// ANDROID_STATISTICS_LENS_SHADING_MAP_MODE +typedef enum camera_metadata_enum_android_statistics_lens_shading_map_mode { + ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF , // HIDL v3.2 + ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_ON , // HIDL v3.2 +} camera_metadata_enum_android_statistics_lens_shading_map_mode_t; + +// ANDROID_STATISTICS_OIS_DATA_MODE +typedef enum camera_metadata_enum_android_statistics_ois_data_mode { + ANDROID_STATISTICS_OIS_DATA_MODE_OFF , // HIDL v3.3 + ANDROID_STATISTICS_OIS_DATA_MODE_ON , // HIDL v3.3 +} camera_metadata_enum_android_statistics_ois_data_mode_t; + + + +// ANDROID_TONEMAP_MODE +typedef enum camera_metadata_enum_android_tonemap_mode { + ANDROID_TONEMAP_MODE_CONTRAST_CURVE , // HIDL v3.2 + ANDROID_TONEMAP_MODE_FAST , // HIDL v3.2 + ANDROID_TONEMAP_MODE_HIGH_QUALITY , // HIDL v3.2 + ANDROID_TONEMAP_MODE_GAMMA_VALUE , // HIDL v3.2 + ANDROID_TONEMAP_MODE_PRESET_CURVE , // HIDL v3.2 +} camera_metadata_enum_android_tonemap_mode_t; + +// ANDROID_TONEMAP_PRESET_CURVE +typedef enum camera_metadata_enum_android_tonemap_preset_curve { + ANDROID_TONEMAP_PRESET_CURVE_SRGB , // HIDL v3.2 + ANDROID_TONEMAP_PRESET_CURVE_REC709 , // HIDL v3.2 +} camera_metadata_enum_android_tonemap_preset_curve_t; + + +// ANDROID_LED_TRANSMIT +typedef enum camera_metadata_enum_android_led_transmit { + ANDROID_LED_TRANSMIT_OFF , // HIDL v3.2 + ANDROID_LED_TRANSMIT_ON , // HIDL v3.2 +} camera_metadata_enum_android_led_transmit_t; + +// ANDROID_LED_AVAILABLE_LEDS +typedef enum camera_metadata_enum_android_led_available_leds { + ANDROID_LED_AVAILABLE_LEDS_TRANSMIT , // HIDL v3.2 +} camera_metadata_enum_android_led_available_leds_t; + + +// ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL +typedef enum camera_metadata_enum_android_info_supported_hardware_level { + ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED , // HIDL v3.2 + ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL , // HIDL v3.2 + ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY , // HIDL v3.2 + ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_3 , // HIDL v3.2 + ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL , // HIDL v3.3 +} camera_metadata_enum_android_info_supported_hardware_level_t; + + +// ANDROID_BLACK_LEVEL_LOCK +typedef enum camera_metadata_enum_android_black_level_lock { + ANDROID_BLACK_LEVEL_LOCK_OFF , // HIDL v3.2 + ANDROID_BLACK_LEVEL_LOCK_ON , // HIDL v3.2 +} camera_metadata_enum_android_black_level_lock_t; + + +// ANDROID_SYNC_FRAME_NUMBER +typedef enum camera_metadata_enum_android_sync_frame_number { + ANDROID_SYNC_FRAME_NUMBER_CONVERGING = -1, // HIDL v3.2 + ANDROID_SYNC_FRAME_NUMBER_UNKNOWN = -2, // HIDL v3.2 +} camera_metadata_enum_android_sync_frame_number_t; + +// ANDROID_SYNC_MAX_LATENCY +typedef enum camera_metadata_enum_android_sync_max_latency { + ANDROID_SYNC_MAX_LATENCY_PER_FRAME_CONTROL = 0, // HIDL v3.2 + ANDROID_SYNC_MAX_LATENCY_UNKNOWN = -1, // HIDL v3.2 +} camera_metadata_enum_android_sync_max_latency_t; + + + +// ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS +typedef enum camera_metadata_enum_android_depth_available_depth_stream_configurations { + ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_OUTPUT , // HIDL v3.2 + ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_INPUT , // HIDL v3.2 +} camera_metadata_enum_android_depth_available_depth_stream_configurations_t; + +// ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE +typedef enum camera_metadata_enum_android_depth_depth_is_exclusive { + ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE_FALSE , // HIDL v3.2 + ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE_TRUE , // HIDL v3.2 +} camera_metadata_enum_android_depth_depth_is_exclusive_t; + + +// ANDROID_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE +typedef enum camera_metadata_enum_android_logical_multi_camera_sensor_sync_type { + ANDROID_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE_APPROXIMATE , // HIDL v3.3 + ANDROID_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE_CALIBRATED , // HIDL v3.3 +} camera_metadata_enum_android_logical_multi_camera_sensor_sync_type_t; + + +// ANDROID_DISTORTION_CORRECTION_MODE +typedef enum camera_metadata_enum_android_distortion_correction_mode { + ANDROID_DISTORTION_CORRECTION_MODE_OFF , // HIDL v3.3 + ANDROID_DISTORTION_CORRECTION_MODE_FAST , // HIDL v3.3 + ANDROID_DISTORTION_CORRECTION_MODE_HIGH_QUALITY , // HIDL v3.3 +} camera_metadata_enum_android_distortion_correction_mode_t; + + diff --git a/spider-cam/libcamera/include/android/metadata/system/camera_vendor_tags.h b/spider-cam/libcamera/include/android/metadata/system/camera_vendor_tags.h new file mode 100644 index 0000000..167ff73 --- /dev/null +++ b/spider-cam/libcamera/include/android/metadata/system/camera_vendor_tags.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* + * Copyright 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SYSTEM_MEDIA_INCLUDE_ANDROID_CAMERA_VENDOR_TAGS_H +#define SYSTEM_MEDIA_INCLUDE_ANDROID_CAMERA_VENDOR_TAGS_H + +#ifdef __cplusplus +extern "C" { +#endif + +#define CAMERA_METADATA_VENDOR_TAG_BOUNDARY 0x80000000u +#define CAMERA_METADATA_INVALID_VENDOR_ID UINT64_MAX + +typedef uint64_t metadata_vendor_id_t; + +/** + * Vendor tags: + * + * This structure contains basic functions for enumerating an immutable set of + * vendor-defined camera metadata tags, and querying static information about + * their structure/type. The intended use of this information is to validate + * the structure of metadata returned by the camera HAL, and to allow vendor- + * defined metadata tags to be visible in application facing camera API. + */ +typedef struct vendor_tag_ops vendor_tag_ops_t; +struct vendor_tag_ops { + /** + * Get the number of vendor tags supported on this platform. Used to + * calculate the size of buffer needed for holding the array of all tags + * returned by get_all_tags(). This must return -1 on error. + */ + int (*get_tag_count)(const vendor_tag_ops_t *v); + + /** + * Fill an array with all of the supported vendor tags on this platform. + * get_tag_count() must return the number of tags supported, and + * tag_array will be allocated with enough space to hold the number of tags + * returned by get_tag_count(). + */ + void (*get_all_tags)(const vendor_tag_ops_t *v, uint32_t *tag_array); + + /** + * Get the vendor section name for a vendor-specified entry tag. This will + * only be called for vendor-defined tags. + * + * The naming convention for the vendor-specific section names should + * follow a style similar to the Java package style. For example, + * CameraZoom Inc. must prefix their sections with "com.camerazoom." + * This must return NULL if the tag is outside the bounds of + * vendor-defined sections. + * + * There may be different vendor-defined tag sections, for example the + * phone maker, the chipset maker, and the camera module maker may each + * have their own "com.vendor."-prefixed section. + * + * The memory pointed to by the return value must remain valid for the + * lifetime of the module, and is owned by the module. + */ + const char *(*get_section_name)(const vendor_tag_ops_t *v, uint32_t tag); + + /** + * Get the tag name for a vendor-specified entry tag. This is only called + * for vendor-defined tags, and must return NULL if it is not a + * vendor-defined tag. + * + * The memory pointed to by the return value must remain valid for the + * lifetime of the module, and is owned by the module. + */ + const char *(*get_tag_name)(const vendor_tag_ops_t *v, uint32_t tag); + + /** + * Get tag type for a vendor-specified entry tag. The type returned must be + * a valid type defined in camera_metadata.h. This method is only called + * for tags >= CAMERA_METADATA_VENDOR_TAG_BOUNDARY, and must return + * -1 if the tag is outside the bounds of the vendor-defined sections. + */ + int (*get_tag_type)(const vendor_tag_ops_t *v, uint32_t tag); + + /* Reserved for future use. These must be initialized to NULL. */ + void* reserved[8]; +}; + +struct vendor_tag_cache_ops { + /** + * Get the number of vendor tags supported on this platform. Used to + * calculate the size of buffer needed for holding the array of all tags + * returned by get_all_tags(). This must return -1 on error. + */ + int (*get_tag_count)(metadata_vendor_id_t id); + + /** + * Fill an array with all of the supported vendor tags on this platform. + * get_tag_count() must return the number of tags supported, and + * tag_array will be allocated with enough space to hold the number of tags + * returned by get_tag_count(). + */ + void (*get_all_tags)(uint32_t *tag_array, metadata_vendor_id_t id); + + /** + * Get the vendor section name for a vendor-specified entry tag. This will + * only be called for vendor-defined tags. + * + * The naming convention for the vendor-specific section names should + * follow a style similar to the Java package style. For example, + * CameraZoom Inc. must prefix their sections with "com.camerazoom." + * This must return NULL if the tag is outside the bounds of + * vendor-defined sections. + * + * There may be different vendor-defined tag sections, for example the + * phone maker, the chipset maker, and the camera module maker may each + * have their own "com.vendor."-prefixed section. + * + * The memory pointed to by the return value must remain valid for the + * lifetime of the module, and is owned by the module. + */ + const char *(*get_section_name)(uint32_t tag, metadata_vendor_id_t id); + + /** + * Get the tag name for a vendor-specified entry tag. This is only called + * for vendor-defined tags, and must return NULL if it is not a + * vendor-defined tag. + * + * The memory pointed to by the return value must remain valid for the + * lifetime of the module, and is owned by the module. + */ + const char *(*get_tag_name)(uint32_t tag, metadata_vendor_id_t id); + + /** + * Get tag type for a vendor-specified entry tag. The type returned must be + * a valid type defined in camera_metadata.h. This method is only called + * for tags >= CAMERA_METADATA_VENDOR_TAG_BOUNDARY, and must return + * -1 if the tag is outside the bounds of the vendor-defined sections. + */ + int (*get_tag_type)(uint32_t tag, metadata_vendor_id_t id); + + /* Reserved for future use. These must be initialized to NULL. */ + void* reserved[8]; +}; + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* SYSTEM_MEDIA_INCLUDE_ANDROID_CAMERA_VENDOR_TAGS_H */ + diff --git a/spider-cam/libcamera/include/android/system/core/include/android/log.h b/spider-cam/libcamera/include/android/system/core/include/android/log.h new file mode 100644 index 0000000..dbad9dd --- /dev/null +++ b/spider-cam/libcamera/include/android/system/core/include/android/log.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* + * Copyright (C) 2009 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _ANDROID_LOG_H +#define _ANDROID_LOG_H + +/****************************************************************** + * + * IMPORTANT NOTICE: + * + * This file is part of Android's set of stable system headers + * exposed by the Android NDK (Native Development Kit) since + * platform release 1.5 + * + * Third-party source AND binary code relies on the definitions + * here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES. + * + * - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES) + * - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS + * - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY + * - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES + */ + +/* + * Support routines to send messages to the Android in-kernel log buffer, + * which can later be accessed through the 'logcat' utility. + * + * Each log message must have + * - a priority + * - a log tag + * - some text + * + * The tag normally corresponds to the component that emits the log message, + * and should be reasonably small. + * + * Log message text may be truncated to less than an implementation-specific + * limit (e.g. 1023 characters max). + * + * Note that a newline character ("\n") will be appended automatically to your + * log message, if not already there. It is not possible to send several messages + * and have them appear on a single line in logcat. + * + * PLEASE USE LOGS WITH MODERATION: + * + * - Sending log messages eats CPU and slow down your application and the + * system. + * + * - The circular log buffer is pretty small (<64KB), sending many messages + * might push off other important log messages from the rest of the system. + * + * - In release builds, only send log messages to account for exceptional + * conditions. + * + * NOTE: These functions MUST be implemented by /system/lib/liblog.so + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Android log priority values, in ascending priority order. + */ +typedef enum android_LogPriority { + ANDROID_LOG_UNKNOWN = 0, + ANDROID_LOG_DEFAULT, /* only for SetMinPriority() */ + ANDROID_LOG_VERBOSE, + ANDROID_LOG_DEBUG, + ANDROID_LOG_INFO, + ANDROID_LOG_WARN, + ANDROID_LOG_ERROR, + ANDROID_LOG_FATAL, + ANDROID_LOG_SILENT, /* only for SetMinPriority(); must be last */ +} android_LogPriority; + +/* + * Send a simple string to the log. + */ +int __android_log_write(int prio, const char *tag, const char *text); + +/* + * Send a formatted string to the log, used like printf(fmt,...) + */ +int __android_log_print(int prio, const char *tag, const char *fmt, ...) +#if defined(__GNUC__) +#ifdef __USE_MINGW_ANSI_STDIO +#if __USE_MINGW_ANSI_STDIO + __attribute__ ((format(gnu_printf, 3, 4))) +#else + __attribute__ ((format(printf, 3, 4))) +#endif +#else + __attribute__ ((format(printf, 3, 4))) +#endif +#endif + ; + +/* + * A variant of __android_log_print() that takes a va_list to list + * additional parameters. + */ +int __android_log_vprint(int prio, const char *tag, + const char *fmt, va_list ap); + +/* + * Log an assertion failure and abort the process to have a chance + * to inspect it if a debugger is attached. This uses the FATAL priority. + */ +void __android_log_assert(const char *cond, const char *tag, + const char *fmt, ...) +#if defined(__GNUC__) + __attribute__ ((noreturn)) +#ifdef __USE_MINGW_ANSI_STDIO +#if __USE_MINGW_ANSI_STDIO + __attribute__ ((format(gnu_printf, 3, 4))) +#else + __attribute__ ((format(printf, 3, 4))) +#endif +#else + __attribute__ ((format(printf, 3, 4))) +#endif +#endif + ; + +#ifdef __cplusplus +} +#endif + +#endif /* _ANDROID_LOG_H */ diff --git a/spider-cam/libcamera/include/android/system/core/include/cutils/compiler.h b/spider-cam/libcamera/include/android/system/core/include/cutils/compiler.h new file mode 100644 index 0000000..efd1b1c --- /dev/null +++ b/spider-cam/libcamera/include/android/system/core/include/cutils/compiler.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* + * Copyright (C) 2009 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_CUTILS_COMPILER_H +#define ANDROID_CUTILS_COMPILER_H + +/* + * helps the compiler's optimizer predicting branches + */ + +#ifdef __cplusplus +# define CC_LIKELY( exp ) (__builtin_expect( !!(exp), true )) +# define CC_UNLIKELY( exp ) (__builtin_expect( !!(exp), false )) +#else +# define CC_LIKELY( exp ) (__builtin_expect( !!(exp), 1 )) +# define CC_UNLIKELY( exp ) (__builtin_expect( !!(exp), 0 )) +#endif + +/** + * exports marked symbols + * + * if used on a C++ class declaration, this macro must be inserted + * after the "class" keyword. For instance: + * + * template + * class ANDROID_API Singleton { } + */ + +#define ANDROID_API __attribute__((visibility("default"))) + +#endif // ANDROID_CUTILS_COMPILER_H diff --git a/spider-cam/libcamera/include/android/system/core/include/cutils/native_handle.h b/spider-cam/libcamera/include/android/system/core/include/cutils/native_handle.h new file mode 100644 index 0000000..dbd3767 --- /dev/null +++ b/spider-cam/libcamera/include/android/system/core/include/cutils/native_handle.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* + * Copyright (C) 2009 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NATIVE_HANDLE_H_ +#define NATIVE_HANDLE_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define NATIVE_HANDLE_MAX_FDS 1024 +#define NATIVE_HANDLE_MAX_INTS 1024 + +/* Declare a char array for use with native_handle_init */ +#define NATIVE_HANDLE_DECLARE_STORAGE(name, maxFds, maxInts) \ + alignas(native_handle_t) char (name)[ \ + sizeof(native_handle_t) + sizeof(int) * ((maxFds) + (maxInts))] + +typedef struct native_handle +{ + int version; /* sizeof(native_handle_t) */ + int numFds; /* number of file-descriptors at &data[0] */ + int numInts; /* number of ints at &data[numFds] */ +#if defined(__clang__) +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wzero-length-array" +#endif + int data[0]; /* numFds + numInts ints */ +#if defined(__clang__) +#pragma clang diagnostic pop +#endif +} native_handle_t; + +typedef const native_handle_t* buffer_handle_t; + +/* + * native_handle_close + * + * closes the file descriptors contained in this native_handle_t + * + * return 0 on success, or a negative error code on failure + * + */ +int native_handle_close(const native_handle_t* h); + +/* + * native_handle_init + * + * Initializes a native_handle_t from storage. storage must be declared with + * NATIVE_HANDLE_DECLARE_STORAGE. numFds and numInts must not respectively + * exceed maxFds and maxInts used to declare the storage. + */ +native_handle_t* native_handle_init(char* storage, int numFds, int numInts); + +/* + * native_handle_create + * + * creates a native_handle_t and initializes it. must be destroyed with + * native_handle_delete(). + * + */ +native_handle_t* native_handle_create(int numFds, int numInts); + +/* + * native_handle_clone + * + * creates a native_handle_t and initializes it from another native_handle_t. + * Must be destroyed with native_handle_delete(). + * + */ +native_handle_t* native_handle_clone(const native_handle_t* handle); + +/* + * native_handle_delete + * + * frees a native_handle_t allocated with native_handle_create(). + * This ONLY frees the memory allocated for the native_handle_t, but doesn't + * close the file descriptors; which can be achieved with native_handle_close(). + * + * return 0 on success, or a negative error code on failure + * + */ +int native_handle_delete(native_handle_t* h); + + +#ifdef __cplusplus +} +#endif + +#endif /* NATIVE_HANDLE_H_ */ diff --git a/spider-cam/libcamera/include/android/system/core/include/system/camera.h b/spider-cam/libcamera/include/android/system/core/include/system/camera.h new file mode 100644 index 0000000..9d69588 --- /dev/null +++ b/spider-cam/libcamera/include/android/system/core/include/system/camera.h @@ -0,0 +1,308 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SYSTEM_CORE_INCLUDE_ANDROID_CAMERA_H +#define SYSTEM_CORE_INCLUDE_ANDROID_CAMERA_H + +#include +#include +#include +#include +#include +#include + +__BEGIN_DECLS + +/** + * A set of bit masks for specifying how the received preview frames are + * handled before the previewCallback() call. + * + * The least significant 3 bits of an "int" value are used for this purpose: + * + * ..... 0 0 0 + * ^ ^ ^ + * | | |---------> determine whether the callback is enabled or not + * | |-----------> determine whether the callback is one-shot or not + * |-------------> determine whether the frame is copied out or not + * + * WARNING: When a frame is sent directly without copying, it is the frame + * receiver's responsiblity to make sure that the frame data won't get + * corrupted by subsequent preview frames filled by the camera. This flag is + * recommended only when copying out data brings significant performance price + * and the handling/processing of the received frame data is always faster than + * the preview frame rate so that data corruption won't occur. + * + * For instance, + * 1. 0x00 disables the callback. In this case, copy out and one shot bits + * are ignored. + * 2. 0x01 enables a callback without copying out the received frames. A + * typical use case is the Camcorder application to avoid making costly + * frame copies. + * 3. 0x05 is enabling a callback with frame copied out repeatedly. A typical + * use case is the Camera application. + * 4. 0x07 is enabling a callback with frame copied out only once. A typical + * use case is the Barcode scanner application. + */ + +enum { + CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK = 0x01, + CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK = 0x02, + CAMERA_FRAME_CALLBACK_FLAG_COPY_OUT_MASK = 0x04, + /** Typical use cases */ + CAMERA_FRAME_CALLBACK_FLAG_NOOP = 0x00, + CAMERA_FRAME_CALLBACK_FLAG_CAMCORDER = 0x01, + CAMERA_FRAME_CALLBACK_FLAG_CAMERA = 0x05, + CAMERA_FRAME_CALLBACK_FLAG_BARCODE_SCANNER = 0x07 +}; + +/** msgType in notifyCallback and dataCallback functions */ +enum { + CAMERA_MSG_ERROR = 0x0001, // notifyCallback + CAMERA_MSG_SHUTTER = 0x0002, // notifyCallback + CAMERA_MSG_FOCUS = 0x0004, // notifyCallback + CAMERA_MSG_ZOOM = 0x0008, // notifyCallback + CAMERA_MSG_PREVIEW_FRAME = 0x0010, // dataCallback + CAMERA_MSG_VIDEO_FRAME = 0x0020, // data_timestamp_callback + CAMERA_MSG_POSTVIEW_FRAME = 0x0040, // dataCallback + CAMERA_MSG_RAW_IMAGE = 0x0080, // dataCallback + CAMERA_MSG_COMPRESSED_IMAGE = 0x0100, // dataCallback + CAMERA_MSG_RAW_IMAGE_NOTIFY = 0x0200, // dataCallback + // Preview frame metadata. This can be combined with + // CAMERA_MSG_PREVIEW_FRAME in dataCallback. For example, the apps can + // request FRAME and METADATA. Or the apps can request only FRAME or only + // METADATA. + CAMERA_MSG_PREVIEW_METADATA = 0x0400, // dataCallback + // Notify on autofocus start and stop. This is useful in continuous + // autofocus - FOCUS_MODE_CONTINUOUS_VIDEO and FOCUS_MODE_CONTINUOUS_PICTURE. + CAMERA_MSG_FOCUS_MOVE = 0x0800, // notifyCallback + CAMERA_MSG_ALL_MSGS = 0xFFFF +}; + +/** cmdType in sendCommand functions */ +enum { + CAMERA_CMD_START_SMOOTH_ZOOM = 1, + CAMERA_CMD_STOP_SMOOTH_ZOOM = 2, + + /** + * Set the clockwise rotation of preview display (setPreviewDisplay) in + * degrees. This affects the preview frames and the picture displayed after + * snapshot. This method is useful for portrait mode applications. Note + * that preview display of front-facing cameras is flipped horizontally + * before the rotation, that is, the image is reflected along the central + * vertical axis of the camera sensor. So the users can see themselves as + * looking into a mirror. + * + * This does not affect the order of byte array of + * CAMERA_MSG_PREVIEW_FRAME, CAMERA_MSG_VIDEO_FRAME, + * CAMERA_MSG_POSTVIEW_FRAME, CAMERA_MSG_RAW_IMAGE, or + * CAMERA_MSG_COMPRESSED_IMAGE. This is allowed to be set during preview + * since API level 14. + */ + CAMERA_CMD_SET_DISPLAY_ORIENTATION = 3, + + /** + * cmdType to disable/enable shutter sound. In sendCommand passing arg1 = + * 0 will disable, while passing arg1 = 1 will enable the shutter sound. + */ + CAMERA_CMD_ENABLE_SHUTTER_SOUND = 4, + + /* cmdType to play recording sound */ + CAMERA_CMD_PLAY_RECORDING_SOUND = 5, + + /** + * Start the face detection. This should be called after preview is started. + * The camera will notify the listener of CAMERA_MSG_FACE and the detected + * faces in the preview frame. The detected faces may be the same as the + * previous ones. Apps should call CAMERA_CMD_STOP_FACE_DETECTION to stop + * the face detection. This method is supported if CameraParameters + * KEY_MAX_NUM_HW_DETECTED_FACES or KEY_MAX_NUM_SW_DETECTED_FACES is + * bigger than 0. Hardware and software face detection should not be running + * at the same time. If the face detection has started, apps should not send + * this again. + * + * In hardware face detection mode, CameraParameters KEY_WHITE_BALANCE, + * KEY_FOCUS_AREAS and KEY_METERING_AREAS have no effect. + * + * arg1 is the face detection type. It can be CAMERA_FACE_DETECTION_HW or + * CAMERA_FACE_DETECTION_SW. If the type of face detection requested is not + * supported, the HAL must return BAD_VALUE. + */ + CAMERA_CMD_START_FACE_DETECTION = 6, + + /** + * Stop the face detection. + */ + CAMERA_CMD_STOP_FACE_DETECTION = 7, + + /** + * Enable/disable focus move callback (CAMERA_MSG_FOCUS_MOVE). Passing + * arg1 = 0 will disable, while passing arg1 = 1 will enable the callback. + */ + CAMERA_CMD_ENABLE_FOCUS_MOVE_MSG = 8, + + /** + * Ping camera service to see if camera hardware is released. + * + * When any camera method returns error, the client can use ping command + * to see if the camera has been taken away by other clients. If the result + * is NO_ERROR, it means the camera hardware is not released. If the result + * is not NO_ERROR, the camera has been released and the existing client + * can silently finish itself or show a dialog. + */ + CAMERA_CMD_PING = 9, + + /** + * Configure the number of video buffers used for recording. The intended + * video buffer count for recording is passed as arg1, which must be + * greater than 0. This command must be sent before recording is started. + * This command returns INVALID_OPERATION error if it is sent after video + * recording is started, or the command is not supported at all. This + * command also returns a BAD_VALUE error if the intended video buffer + * count is non-positive or too big to be realized. + */ + CAMERA_CMD_SET_VIDEO_BUFFER_COUNT = 10, + + /** + * Configure an explicit format to use for video recording metadata mode. + * This can be used to switch the format from the + * default IMPLEMENTATION_DEFINED gralloc format to some other + * device-supported format, and the default dataspace from the BT_709 color + * space to some other device-supported dataspace. arg1 is the HAL pixel + * format, and arg2 is the HAL dataSpace. This command returns + * INVALID_OPERATION error if it is sent after video recording is started, + * or the command is not supported at all. + * + * If the gralloc format is set to a format other than + * IMPLEMENTATION_DEFINED, then HALv3 devices will use gralloc usage flags + * of SW_READ_OFTEN. + */ + CAMERA_CMD_SET_VIDEO_FORMAT = 11 +}; + +/** camera fatal errors */ +enum { + CAMERA_ERROR_UNKNOWN = 1, + /** + * Camera was released because another client has connected to the camera. + * The original client should call Camera::disconnect immediately after + * getting this notification. Otherwise, the camera will be released by + * camera service in a short time. The client should not call any method + * (except disconnect and sending CAMERA_CMD_PING) after getting this. + */ + CAMERA_ERROR_RELEASED = 2, + + /** + * Camera was released because device policy change or the client application + * is going to background. The client should call Camera::disconnect + * immediately after getting this notification. Otherwise, the camera will be + * released by camera service in a short time. The client should not call any + * method (except disconnect and sending CAMERA_CMD_PING) after getting this. + */ + CAMERA_ERROR_DISABLED = 3, + CAMERA_ERROR_SERVER_DIED = 100 +}; + +enum { + /** The facing of the camera is opposite to that of the screen. */ + CAMERA_FACING_BACK = 0, + /** The facing of the camera is the same as that of the screen. */ + CAMERA_FACING_FRONT = 1, + /** + * The facing of the camera is not fixed relative to the screen. + * The cameras with this facing are external cameras, e.g. USB cameras. + */ + CAMERA_FACING_EXTERNAL = 2 +}; + +enum { + /** Hardware face detection. It does not use much CPU. */ + CAMERA_FACE_DETECTION_HW = 0, + /** + * Software face detection. It uses some CPU. Applications must use + * Camera.setPreviewTexture for preview in this mode. + */ + CAMERA_FACE_DETECTION_SW = 1 +}; + +/** + * The information of a face from camera face detection. + */ +typedef struct camera_face { + /** + * Bounds of the face [left, top, right, bottom]. (-1000, -1000) represents + * the top-left of the camera field of view, and (1000, 1000) represents the + * bottom-right of the field of view. The width and height cannot be 0 or + * negative. This is supported by both hardware and software face detection. + * + * The direction is relative to the sensor orientation, that is, what the + * sensor sees. The direction is not affected by the rotation or mirroring + * of CAMERA_CMD_SET_DISPLAY_ORIENTATION. + */ + int32_t rect[4]; + + /** + * The confidence level of the face. The range is 1 to 100. 100 is the + * highest confidence. This is supported by both hardware and software + * face detection. + */ + int32_t score; + + /** + * An unique id per face while the face is visible to the tracker. If + * the face leaves the field-of-view and comes back, it will get a new + * id. If the value is 0, id is not supported. + */ + int32_t id; + + /** + * The coordinates of the center of the left eye. The range is -1000 to + * 1000. -2000, -2000 if this is not supported. + */ + int32_t left_eye[2]; + + /** + * The coordinates of the center of the right eye. The range is -1000 to + * 1000. -2000, -2000 if this is not supported. + */ + int32_t right_eye[2]; + + /** + * The coordinates of the center of the mouth. The range is -1000 to 1000. + * -2000, -2000 if this is not supported. + */ + int32_t mouth[2]; + +} camera_face_t; + +/** + * The metadata of the frame data. + */ +typedef struct camera_frame_metadata { + /** + * The number of detected faces in the frame. + */ + int32_t number_of_faces; + + /** + * An array of the detected faces. The length is number_of_faces. + */ + camera_face_t *faces; +} camera_frame_metadata_t; + +__END_DECLS + +#endif /* SYSTEM_CORE_INCLUDE_ANDROID_CAMERA_H */ diff --git a/spider-cam/libcamera/include/android/system/core/include/system/graphics-base-v1.0.h b/spider-cam/libcamera/include/android/system/core/include/system/graphics-base-v1.0.h new file mode 100644 index 0000000..7548d87 --- /dev/null +++ b/spider-cam/libcamera/include/android/system/core/include/system/graphics-base-v1.0.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +// This file is autogenerated by hidl-gen. Do not edit manually. +// Source: android.hardware.graphics.common@1.0 +// Location: hardware/interfaces/graphics/common/1.0/ + +#ifndef HIDL_GENERATED_ANDROID_HARDWARE_GRAPHICS_COMMON_V1_0_EXPORTED_CONSTANTS_H_ +#define HIDL_GENERATED_ANDROID_HARDWARE_GRAPHICS_COMMON_V1_0_EXPORTED_CONSTANTS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + HAL_PIXEL_FORMAT_RGBA_8888 = 1, + HAL_PIXEL_FORMAT_RGBX_8888 = 2, + HAL_PIXEL_FORMAT_RGB_888 = 3, + HAL_PIXEL_FORMAT_RGB_565 = 4, + HAL_PIXEL_FORMAT_BGRA_8888 = 5, + HAL_PIXEL_FORMAT_YCBCR_422_SP = 16, + HAL_PIXEL_FORMAT_YCRCB_420_SP = 17, + HAL_PIXEL_FORMAT_YCBCR_422_I = 20, + HAL_PIXEL_FORMAT_RGBA_FP16 = 22, + HAL_PIXEL_FORMAT_RAW16 = 32, + HAL_PIXEL_FORMAT_BLOB = 33, + HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED = 34, + HAL_PIXEL_FORMAT_YCBCR_420_888 = 35, + HAL_PIXEL_FORMAT_RAW_OPAQUE = 36, + HAL_PIXEL_FORMAT_RAW10 = 37, + HAL_PIXEL_FORMAT_RAW12 = 38, + HAL_PIXEL_FORMAT_RGBA_1010102 = 43, + HAL_PIXEL_FORMAT_Y8 = 538982489, + HAL_PIXEL_FORMAT_Y16 = 540422489, + HAL_PIXEL_FORMAT_YV12 = 842094169, +} android_pixel_format_t; + +typedef enum { + HAL_TRANSFORM_FLIP_H = 1, // (1 << 0) + HAL_TRANSFORM_FLIP_V = 2, // (1 << 1) + HAL_TRANSFORM_ROT_90 = 4, // (1 << 2) + HAL_TRANSFORM_ROT_180 = 3, // (FLIP_H | FLIP_V) + HAL_TRANSFORM_ROT_270 = 7, // ((FLIP_H | FLIP_V) | ROT_90) +} android_transform_t; + +typedef enum { + HAL_DATASPACE_UNKNOWN = 0, + HAL_DATASPACE_ARBITRARY = 1, + HAL_DATASPACE_STANDARD_SHIFT = 16, + HAL_DATASPACE_STANDARD_MASK = 4128768, // (63 << STANDARD_SHIFT) + HAL_DATASPACE_STANDARD_UNSPECIFIED = 0, // (0 << STANDARD_SHIFT) + HAL_DATASPACE_STANDARD_BT709 = 65536, // (1 << STANDARD_SHIFT) + HAL_DATASPACE_STANDARD_BT601_625 = 131072, // (2 << STANDARD_SHIFT) + HAL_DATASPACE_STANDARD_BT601_625_UNADJUSTED = 196608, // (3 << STANDARD_SHIFT) + HAL_DATASPACE_STANDARD_BT601_525 = 262144, // (4 << STANDARD_SHIFT) + HAL_DATASPACE_STANDARD_BT601_525_UNADJUSTED = 327680, // (5 << STANDARD_SHIFT) + HAL_DATASPACE_STANDARD_BT2020 = 393216, // (6 << STANDARD_SHIFT) + HAL_DATASPACE_STANDARD_BT2020_CONSTANT_LUMINANCE = 458752, // (7 << STANDARD_SHIFT) + HAL_DATASPACE_STANDARD_BT470M = 524288, // (8 << STANDARD_SHIFT) + HAL_DATASPACE_STANDARD_FILM = 589824, // (9 << STANDARD_SHIFT) + HAL_DATASPACE_STANDARD_DCI_P3 = 655360, // (10 << STANDARD_SHIFT) + HAL_DATASPACE_STANDARD_ADOBE_RGB = 720896, // (11 << STANDARD_SHIFT) + HAL_DATASPACE_TRANSFER_SHIFT = 22, + HAL_DATASPACE_TRANSFER_MASK = 130023424, // (31 << TRANSFER_SHIFT) + HAL_DATASPACE_TRANSFER_UNSPECIFIED = 0, // (0 << TRANSFER_SHIFT) + HAL_DATASPACE_TRANSFER_LINEAR = 4194304, // (1 << TRANSFER_SHIFT) + HAL_DATASPACE_TRANSFER_SRGB = 8388608, // (2 << TRANSFER_SHIFT) + HAL_DATASPACE_TRANSFER_SMPTE_170M = 12582912, // (3 << TRANSFER_SHIFT) + HAL_DATASPACE_TRANSFER_GAMMA2_2 = 16777216, // (4 << TRANSFER_SHIFT) + HAL_DATASPACE_TRANSFER_GAMMA2_6 = 20971520, // (5 << TRANSFER_SHIFT) + HAL_DATASPACE_TRANSFER_GAMMA2_8 = 25165824, // (6 << TRANSFER_SHIFT) + HAL_DATASPACE_TRANSFER_ST2084 = 29360128, // (7 << TRANSFER_SHIFT) + HAL_DATASPACE_TRANSFER_HLG = 33554432, // (8 << TRANSFER_SHIFT) + HAL_DATASPACE_RANGE_SHIFT = 27, + HAL_DATASPACE_RANGE_MASK = 939524096, // (7 << RANGE_SHIFT) + HAL_DATASPACE_RANGE_UNSPECIFIED = 0, // (0 << RANGE_SHIFT) + HAL_DATASPACE_RANGE_FULL = 134217728, // (1 << RANGE_SHIFT) + HAL_DATASPACE_RANGE_LIMITED = 268435456, // (2 << RANGE_SHIFT) + HAL_DATASPACE_RANGE_EXTENDED = 402653184, // (3 << RANGE_SHIFT) + HAL_DATASPACE_SRGB_LINEAR = 512, + HAL_DATASPACE_V0_SRGB_LINEAR = 138477568, // ((STANDARD_BT709 | TRANSFER_LINEAR) | RANGE_FULL) + HAL_DATASPACE_V0_SCRGB_LINEAR = + 406913024, // ((STANDARD_BT709 | TRANSFER_LINEAR) | RANGE_EXTENDED) + HAL_DATASPACE_SRGB = 513, + HAL_DATASPACE_V0_SRGB = 142671872, // ((STANDARD_BT709 | TRANSFER_SRGB) | RANGE_FULL) + HAL_DATASPACE_V0_SCRGB = 411107328, // ((STANDARD_BT709 | TRANSFER_SRGB) | RANGE_EXTENDED) + HAL_DATASPACE_JFIF = 257, + HAL_DATASPACE_V0_JFIF = 146931712, // ((STANDARD_BT601_625 | TRANSFER_SMPTE_170M) | RANGE_FULL) + HAL_DATASPACE_BT601_625 = 258, + HAL_DATASPACE_V0_BT601_625 = + 281149440, // ((STANDARD_BT601_625 | TRANSFER_SMPTE_170M) | RANGE_LIMITED) + HAL_DATASPACE_BT601_525 = 259, + HAL_DATASPACE_V0_BT601_525 = + 281280512, // ((STANDARD_BT601_525 | TRANSFER_SMPTE_170M) | RANGE_LIMITED) + HAL_DATASPACE_BT709 = 260, + HAL_DATASPACE_V0_BT709 = 281083904, // ((STANDARD_BT709 | TRANSFER_SMPTE_170M) | RANGE_LIMITED) + HAL_DATASPACE_DCI_P3_LINEAR = 139067392, // ((STANDARD_DCI_P3 | TRANSFER_LINEAR) | RANGE_FULL) + HAL_DATASPACE_DCI_P3 = 155844608, // ((STANDARD_DCI_P3 | TRANSFER_GAMMA2_6) | RANGE_FULL) + HAL_DATASPACE_DISPLAY_P3_LINEAR = + 139067392, // ((STANDARD_DCI_P3 | TRANSFER_LINEAR) | RANGE_FULL) + HAL_DATASPACE_DISPLAY_P3 = 143261696, // ((STANDARD_DCI_P3 | TRANSFER_SRGB) | RANGE_FULL) + HAL_DATASPACE_ADOBE_RGB = 151715840, // ((STANDARD_ADOBE_RGB | TRANSFER_GAMMA2_2) | RANGE_FULL) + HAL_DATASPACE_BT2020_LINEAR = 138805248, // ((STANDARD_BT2020 | TRANSFER_LINEAR) | RANGE_FULL) + HAL_DATASPACE_BT2020 = 147193856, // ((STANDARD_BT2020 | TRANSFER_SMPTE_170M) | RANGE_FULL) + HAL_DATASPACE_BT2020_PQ = 163971072, // ((STANDARD_BT2020 | TRANSFER_ST2084) | RANGE_FULL) + HAL_DATASPACE_DEPTH = 4096, + HAL_DATASPACE_SENSOR = 4097, +} android_dataspace_t; + +typedef enum { + HAL_COLOR_MODE_NATIVE = 0, + HAL_COLOR_MODE_STANDARD_BT601_625 = 1, + HAL_COLOR_MODE_STANDARD_BT601_625_UNADJUSTED = 2, + HAL_COLOR_MODE_STANDARD_BT601_525 = 3, + HAL_COLOR_MODE_STANDARD_BT601_525_UNADJUSTED = 4, + HAL_COLOR_MODE_STANDARD_BT709 = 5, + HAL_COLOR_MODE_DCI_P3 = 6, + HAL_COLOR_MODE_SRGB = 7, + HAL_COLOR_MODE_ADOBE_RGB = 8, + HAL_COLOR_MODE_DISPLAY_P3 = 9, +} android_color_mode_t; + +typedef enum { + HAL_COLOR_TRANSFORM_IDENTITY = 0, + HAL_COLOR_TRANSFORM_ARBITRARY_MATRIX = 1, + HAL_COLOR_TRANSFORM_VALUE_INVERSE = 2, + HAL_COLOR_TRANSFORM_GRAYSCALE = 3, + HAL_COLOR_TRANSFORM_CORRECT_PROTANOPIA = 4, + HAL_COLOR_TRANSFORM_CORRECT_DEUTERANOPIA = 5, + HAL_COLOR_TRANSFORM_CORRECT_TRITANOPIA = 6, +} android_color_transform_t; + +typedef enum { + HAL_HDR_DOLBY_VISION = 1, + HAL_HDR_HDR10 = 2, + HAL_HDR_HLG = 3, +} android_hdr_t; + +#ifdef __cplusplus +} +#endif + +#endif // HIDL_GENERATED_ANDROID_HARDWARE_GRAPHICS_COMMON_V1_0_EXPORTED_CONSTANTS_H_ diff --git a/spider-cam/libcamera/include/android/system/core/include/system/graphics-base-v1.1.h b/spider-cam/libcamera/include/android/system/core/include/system/graphics-base-v1.1.h new file mode 100644 index 0000000..3513072 --- /dev/null +++ b/spider-cam/libcamera/include/android/system/core/include/system/graphics-base-v1.1.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +// This file is autogenerated by hidl-gen. Do not edit manually. +// Source: android.hardware.graphics.common@1.1 +// Location: hardware/interfaces/graphics/common/1.1/ + +#ifndef HIDL_GENERATED_ANDROID_HARDWARE_GRAPHICS_COMMON_V1_1_EXPORTED_CONSTANTS_H_ +#define HIDL_GENERATED_ANDROID_HARDWARE_GRAPHICS_COMMON_V1_1_EXPORTED_CONSTANTS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + HAL_PIXEL_FORMAT_DEPTH_16 = 48, + HAL_PIXEL_FORMAT_DEPTH_24 = 49, + HAL_PIXEL_FORMAT_DEPTH_24_STENCIL_8 = 50, + HAL_PIXEL_FORMAT_DEPTH_32F = 51, + HAL_PIXEL_FORMAT_DEPTH_32F_STENCIL_8 = 52, + HAL_PIXEL_FORMAT_STENCIL_8 = 53, + HAL_PIXEL_FORMAT_YCBCR_P010 = 54, +} android_pixel_format_v1_1_t; + +typedef enum { + HAL_DATASPACE_BT2020_ITU = + 281411584, // ((STANDARD_BT2020 | TRANSFER_SMPTE_170M) | RANGE_LIMITED) + HAL_DATASPACE_BT2020_ITU_PQ = + 298188800, // ((STANDARD_BT2020 | TRANSFER_ST2084) | RANGE_LIMITED) + HAL_DATASPACE_BT2020_ITU_HLG = 302383104, // ((STANDARD_BT2020 | TRANSFER_HLG) | RANGE_LIMITED) + HAL_DATASPACE_BT2020_HLG = 168165376, // ((STANDARD_BT2020 | TRANSFER_HLG) | RANGE_FULL) +} android_dataspace_v1_1_t; + +typedef enum { + HAL_COLOR_MODE_BT2020 = 10, + HAL_COLOR_MODE_BT2100_PQ = 11, + HAL_COLOR_MODE_BT2100_HLG = 12, +} android_color_mode_v1_1_t; + +typedef enum { + HAL_RENDER_INTENT_COLORIMETRIC = 0, + HAL_RENDER_INTENT_ENHANCE = 1, + HAL_RENDER_INTENT_TONE_MAP_COLORIMETRIC = 2, + HAL_RENDER_INTENT_TONE_MAP_ENHANCE = 3, +} android_render_intent_v1_1_t; + +#ifdef __cplusplus +} +#endif + +#endif // HIDL_GENERATED_ANDROID_HARDWARE_GRAPHICS_COMMON_V1_1_EXPORTED_CONSTANTS_H_ diff --git a/spider-cam/libcamera/include/android/system/core/include/system/graphics-base.h b/spider-cam/libcamera/include/android/system/core/include/system/graphics-base.h new file mode 100644 index 0000000..d01e987 --- /dev/null +++ b/spider-cam/libcamera/include/android/system/core/include/system/graphics-base.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +#ifndef SYSTEM_CORE_GRAPHICS_BASE_H_ +#define SYSTEM_CORE_GRAPHICS_BASE_H_ + +#include "graphics-base-v1.0.h" +#include "graphics-base-v1.1.h" + +#endif // SYSTEM_CORE_GRAPHICS_BASE_H_ diff --git a/spider-cam/libcamera/include/android/system/core/include/system/graphics-sw.h b/spider-cam/libcamera/include/android/system/core/include/system/graphics-sw.h new file mode 100644 index 0000000..4a1cf82 --- /dev/null +++ b/spider-cam/libcamera/include/android/system/core/include/system/graphics-sw.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +#ifndef SYSTEM_CORE_GRAPHICS_SW_H_ +#define SYSTEM_CORE_GRAPHICS_SW_H_ + +/* Software formats not in the HAL definitions. */ +typedef enum { + HAL_PIXEL_FORMAT_YCBCR_422_888 = 39, // 0x27 + HAL_PIXEL_FORMAT_YCBCR_444_888 = 40, // 0x28 + HAL_PIXEL_FORMAT_FLEX_RGB_888 = 41, // 0x29 + HAL_PIXEL_FORMAT_FLEX_RGBA_8888 = 42, // 0x2A +} android_pixel_format_sw_t; + +/* for compatibility */ +#define HAL_PIXEL_FORMAT_YCbCr_422_888 HAL_PIXEL_FORMAT_YCBCR_422_888 +#define HAL_PIXEL_FORMAT_YCbCr_444_888 HAL_PIXEL_FORMAT_YCBCR_444_888 + +#endif // SYSTEM_CORE_GRAPHICS_SW_H_ diff --git a/spider-cam/libcamera/include/android/system/core/include/system/graphics.h b/spider-cam/libcamera/include/android/system/core/include/system/graphics.h new file mode 100644 index 0000000..6341e64 --- /dev/null +++ b/spider-cam/libcamera/include/android/system/core/include/system/graphics.h @@ -0,0 +1,269 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SYSTEM_CORE_INCLUDE_ANDROID_GRAPHICS_H +#define SYSTEM_CORE_INCLUDE_ANDROID_GRAPHICS_H + +#include +#include + +/* + * Some of the enums are now defined in HIDL in hardware/interfaces and are + * generated. + */ +#include "graphics-base.h" +#include "graphics-sw.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* for compatibility */ +#define HAL_PIXEL_FORMAT_YCbCr_420_888 HAL_PIXEL_FORMAT_YCBCR_420_888 +#define HAL_PIXEL_FORMAT_YCbCr_422_SP HAL_PIXEL_FORMAT_YCBCR_422_SP +#define HAL_PIXEL_FORMAT_YCrCb_420_SP HAL_PIXEL_FORMAT_YCRCB_420_SP +#define HAL_PIXEL_FORMAT_YCbCr_422_I HAL_PIXEL_FORMAT_YCBCR_422_I +typedef android_pixel_format_t android_pixel_format; +typedef android_transform_t android_transform; +typedef android_dataspace_t android_dataspace; +typedef android_color_mode_t android_color_mode; +typedef android_color_transform_t android_color_transform; +typedef android_hdr_t android_hdr; + +/* + * If the HAL needs to create service threads to handle graphics related + * tasks, these threads need to run at HAL_PRIORITY_URGENT_DISPLAY priority + * if they can block the main rendering thread in any way. + * + * the priority of the current thread can be set with: + * + * #include + * setpriority(PRIO_PROCESS, 0, HAL_PRIORITY_URGENT_DISPLAY); + * + */ + +#define HAL_PRIORITY_URGENT_DISPLAY (-8) + +/* + * Structure for describing YCbCr formats for consumption by applications. + * This is used with HAL_PIXEL_FORMAT_YCbCr_*_888. + * + * Buffer chroma subsampling is defined in the format. + * e.g. HAL_PIXEL_FORMAT_YCbCr_420_888 has subsampling 4:2:0. + * + * Buffers must have a 8 bit depth. + * + * y, cb, and cr point to the first byte of their respective planes. + * + * Stride describes the distance in bytes from the first value of one row of + * the image to the first value of the next row. It includes the width of the + * image plus padding. + * ystride is the stride of the luma plane. + * cstride is the stride of the chroma planes. + * + * chroma_step is the distance in bytes from one chroma pixel value to the + * next. This is 2 bytes for semiplanar (because chroma values are interleaved + * and each chroma value is one byte) and 1 for planar. + */ + +struct android_ycbcr { + void *y; + void *cb; + void *cr; + size_t ystride; + size_t cstride; + size_t chroma_step; + + /** reserved for future use, set to 0 by gralloc's (*lock_ycbcr)() */ + uint32_t reserved[8]; +}; + +/* + * Structures for describing flexible YUVA/RGBA formats for consumption by + * applications. Such flexible formats contain a plane for each component (e.g. + * red, green, blue), where each plane is laid out in a grid-like pattern + * occupying unique byte addresses and with consistent byte offsets between + * neighboring pixels. + * + * The android_flex_layout structure is used with any pixel format that can be + * represented by it, such as: + * - HAL_PIXEL_FORMAT_YCbCr_*_888 + * - HAL_PIXEL_FORMAT_FLEX_RGB*_888 + * - HAL_PIXEL_FORMAT_RGB[AX]_888[8],BGRA_8888,RGB_888 + * - HAL_PIXEL_FORMAT_YV12,Y8,Y16,YCbCr_422_SP/I,YCrCb_420_SP + * - even implementation defined formats that can be represented by + * the structures + * + * Vertical increment (aka. row increment or stride) describes the distance in + * bytes from the first pixel of one row to the first pixel of the next row + * (below) for the component plane. This can be negative. + * + * Horizontal increment (aka. column or pixel increment) describes the distance + * in bytes from one pixel to the next pixel (to the right) on the same row for + * the component plane. This can be negative. + * + * Each plane can be subsampled either vertically or horizontally by + * a power-of-two factor. + * + * The bit-depth of each component can be arbitrary, as long as the pixels are + * laid out on whole bytes, in native byte-order, using the most significant + * bits of each unit. + */ + +typedef enum android_flex_component { + /* luma */ + FLEX_COMPONENT_Y = 1 << 0, + /* chroma blue */ + FLEX_COMPONENT_Cb = 1 << 1, + /* chroma red */ + FLEX_COMPONENT_Cr = 1 << 2, + + /* red */ + FLEX_COMPONENT_R = 1 << 10, + /* green */ + FLEX_COMPONENT_G = 1 << 11, + /* blue */ + FLEX_COMPONENT_B = 1 << 12, + + /* alpha */ + FLEX_COMPONENT_A = 1 << 30, +} android_flex_component_t; + +typedef struct android_flex_plane { + /* pointer to the first byte of the top-left pixel of the plane. */ + uint8_t *top_left; + + android_flex_component_t component; + + /* bits allocated for the component in each pixel. Must be a positive + multiple of 8. */ + int32_t bits_per_component; + /* number of the most significant bits used in the format for this + component. Must be between 1 and bits_per_component, inclusive. */ + int32_t bits_used; + + /* horizontal increment */ + int32_t h_increment; + /* vertical increment */ + int32_t v_increment; + /* horizontal subsampling. Must be a positive power of 2. */ + int32_t h_subsampling; + /* vertical subsampling. Must be a positive power of 2. */ + int32_t v_subsampling; +} android_flex_plane_t; + +typedef enum android_flex_format { + /* not a flexible format */ + FLEX_FORMAT_INVALID = 0x0, + FLEX_FORMAT_Y = FLEX_COMPONENT_Y, + FLEX_FORMAT_YCbCr = FLEX_COMPONENT_Y | FLEX_COMPONENT_Cb | FLEX_COMPONENT_Cr, + FLEX_FORMAT_YCbCrA = FLEX_FORMAT_YCbCr | FLEX_COMPONENT_A, + FLEX_FORMAT_RGB = FLEX_COMPONENT_R | FLEX_COMPONENT_G | FLEX_COMPONENT_B, + FLEX_FORMAT_RGBA = FLEX_FORMAT_RGB | FLEX_COMPONENT_A, +} android_flex_format_t; + +typedef struct android_flex_layout { + /* the kind of flexible format */ + android_flex_format_t format; + + /* number of planes; 0 for FLEX_FORMAT_INVALID */ + uint32_t num_planes; + /* a plane for each component; ordered in increasing component value order. + E.g. FLEX_FORMAT_RGBA maps 0 -> R, 1 -> G, etc. + Can be NULL for FLEX_FORMAT_INVALID */ + android_flex_plane_t *planes; +} android_flex_layout_t; + +/** + * Structure used to define depth point clouds for format HAL_PIXEL_FORMAT_BLOB + * with dataSpace value of HAL_DATASPACE_DEPTH. + * When locking a native buffer of the above format and dataSpace value, + * the vaddr pointer can be cast to this structure. + * + * A variable-length list of (x,y,z, confidence) 3D points, as floats. (x, y, + * z) represents a measured point's position, with the coordinate system defined + * by the data source. Confidence represents the estimated likelihood that this + * measurement is correct. It is between 0.f and 1.f, inclusive, with 1.f == + * 100% confidence. + * + * num_points is the number of points in the list + * + * xyz_points is the flexible array of floating-point values. + * It contains (num_points) * 4 floats. + * + * For example: + * android_depth_points d = get_depth_buffer(); + * struct { + * float x; float y; float z; float confidence; + * } firstPoint, lastPoint; + * + * firstPoint.x = d.xyzc_points[0]; + * firstPoint.y = d.xyzc_points[1]; + * firstPoint.z = d.xyzc_points[2]; + * firstPoint.confidence = d.xyzc_points[3]; + * lastPoint.x = d.xyzc_points[(d.num_points - 1) * 4 + 0]; + * lastPoint.y = d.xyzc_points[(d.num_points - 1) * 4 + 1]; + * lastPoint.z = d.xyzc_points[(d.num_points - 1) * 4 + 2]; + * lastPoint.confidence = d.xyzc_points[(d.num_points - 1) * 4 + 3]; + */ + +struct android_depth_points { + uint32_t num_points; + + /** reserved for future use, set to 0 by gralloc's (*lock)() */ + uint32_t reserved[8]; + +#if defined(__clang__) +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wc99-extensions" +#endif + float xyzc_points[]; +#if defined(__clang__) +#pragma clang diagnostic pop +#endif +}; + +/** + * These structures are used to define the reference display's + * capabilities for HDR content. Display engine can use this + * to better tone map content to user's display. + * Color is defined in CIE XYZ coordinates + */ +struct android_xy_color { + float x; + float y; +}; + +struct android_smpte2086_metadata { + struct android_xy_color displayPrimaryRed; + struct android_xy_color displayPrimaryGreen; + struct android_xy_color displayPrimaryBlue; + struct android_xy_color whitePoint; + float maxLuminance; + float minLuminance; +}; + +struct android_cta861_3_metadata { + float maxContentLightLevel; + float maxFrameAverageLightLevel; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* SYSTEM_CORE_INCLUDE_ANDROID_GRAPHICS_H */ diff --git a/spider-cam/libcamera/include/libcamera/base/backtrace.h b/spider-cam/libcamera/include/libcamera/base/backtrace.h new file mode 100644 index 0000000..699ddd9 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/backtrace.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Ideas on Board Oy + * + * Call stack backtraces + */ + +#pragma once + +#include +#include + +#include + +#include + +namespace libcamera { + +class Backtrace +{ +public: + Backtrace(); + + std::string toString(unsigned int skipLevels = 0) const; + +private: + LIBCAMERA_DISABLE_COPY(Backtrace) + + bool backtraceTrace(); + bool unwindTrace(); + + std::vector backtrace_; + std::vector backtraceText_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/bound_method.h b/spider-cam/libcamera/include/libcamera/base/bound_method.h new file mode 100644 index 0000000..dd3488e --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/bound_method.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Method bind and invocation + */ + +#pragma once + +#include +#include +#include +#include + +namespace libcamera { + +class Object; + +enum ConnectionType { + ConnectionTypeAuto, + ConnectionTypeDirect, + ConnectionTypeQueued, + ConnectionTypeBlocking, +}; + +class BoundMethodPackBase +{ +public: + virtual ~BoundMethodPackBase() = default; +}; + +template +class BoundMethodPack : public BoundMethodPackBase +{ +public: + BoundMethodPack(const Args &... args) + : args_(args...) + { + } + + R returnValue() + { + return ret_; + } + + std::tuple...> args_; + R ret_; +}; + +template +class BoundMethodPack : public BoundMethodPackBase +{ +public: + BoundMethodPack(const Args &... args) + : args_(args...) + { + } + + void returnValue() + { + } + + std::tuple...> args_; +}; + +class BoundMethodBase +{ +public: + BoundMethodBase(void *obj, Object *object, ConnectionType type) + : obj_(obj), object_(object), connectionType_(type) + { + } + virtual ~BoundMethodBase() = default; + + template::value> * = nullptr> + bool match(T *obj) { return obj == obj_; } + bool match(Object *object) { return object == object_; } + + Object *object() const { return object_; } + + virtual void invokePack(BoundMethodPackBase *pack) = 0; + +protected: + bool activatePack(std::shared_ptr pack, + bool deleteMethod); + + void *obj_; + Object *object_; + +private: + ConnectionType connectionType_; +}; + +template +class BoundMethodArgs : public BoundMethodBase +{ +public: + using PackType = BoundMethodPack; + +private: + template + std::enable_if_t::value, void> + invokePack(BoundMethodPackBase *pack, std::index_sequence) + { + PackType *args = static_cast(pack); + args->ret_ = invoke(std::get(args->args_)...); + } + + template + std::enable_if_t::value, void> + invokePack(BoundMethodPackBase *pack, std::index_sequence) + { + /* args is effectively unused when the sequence I is empty. */ + PackType *args [[gnu::unused]] = static_cast(pack); + invoke(std::get(args->args_)...); + } + +public: + BoundMethodArgs(void *obj, Object *object, ConnectionType type) + : BoundMethodBase(obj, object, type) {} + + void invokePack(BoundMethodPackBase *pack) override + { + invokePack(pack, std::make_index_sequence{}); + } + + virtual R activate(Args... args, bool deleteMethod = false) = 0; + virtual R invoke(Args... args) = 0; +}; + +template +class BoundMethodFunctor : public BoundMethodArgs +{ +public: + using PackType = typename BoundMethodArgs::PackType; + + BoundMethodFunctor(T *obj, Object *object, Func func, + ConnectionType type = ConnectionTypeAuto) + : BoundMethodArgs(obj, object, type), func_(func) + { + } + + R activate(Args... args, bool deleteMethod = false) override + { + if (!this->object_) + return func_(args...); + + auto pack = std::make_shared(args...); + bool sync = BoundMethodBase::activatePack(pack, deleteMethod); + return sync ? pack->returnValue() : R(); + } + + R invoke(Args... args) override + { + return func_(args...); + } + +private: + Func func_; +}; + +template +class BoundMethodMember : public BoundMethodArgs +{ +public: + using PackType = typename BoundMethodArgs::PackType; + + BoundMethodMember(T *obj, Object *object, R (T::*func)(Args...), + ConnectionType type = ConnectionTypeAuto) + : BoundMethodArgs(obj, object, type), func_(func) + { + } + + bool match(R (T::*func)(Args...)) const { return func == func_; } + + R activate(Args... args, bool deleteMethod = false) override + { + if (!this->object_) { + T *obj = static_cast(this->obj_); + return (obj->*func_)(args...); + } + + auto pack = std::make_shared(args...); + bool sync = BoundMethodBase::activatePack(pack, deleteMethod); + return sync ? pack->returnValue() : R(); + } + + R invoke(Args... args) override + { + T *obj = static_cast(this->obj_); + return (obj->*func_)(args...); + } + +private: + R (T::*func_)(Args...); +}; + +template +class BoundMethodStatic : public BoundMethodArgs +{ +public: + BoundMethodStatic(R (*func)(Args...)) + : BoundMethodArgs(nullptr, nullptr, ConnectionTypeAuto), + func_(func) + { + } + + bool match(R (*func)(Args...)) const { return func == func_; } + + R activate(Args... args, [[maybe_unused]] bool deleteMethod = false) override + { + return (*func_)(args...); + } + + R invoke(Args...) override + { + return R(); + } + +private: + R (*func_)(Args...); +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/class.h b/spider-cam/libcamera/include/libcamera/base/class.h new file mode 100644 index 0000000..a808422 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/class.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * Utilities and helpers for classes + */ + +#pragma once + +#include + +namespace libcamera { + +#ifndef __DOXYGEN__ +#define LIBCAMERA_DISABLE_COPY(klass) \ + klass(const klass &) = delete; \ + klass &operator=(const klass &) = delete; + +#define LIBCAMERA_DISABLE_MOVE(klass) \ + klass(klass &&) = delete; \ + klass &operator=(klass &&) = delete; + +#define LIBCAMERA_DISABLE_COPY_AND_MOVE(klass) \ + LIBCAMERA_DISABLE_COPY(klass) \ + LIBCAMERA_DISABLE_MOVE(klass) +#else +#define LIBCAMERA_DISABLE_COPY(klass) +#define LIBCAMERA_DISABLE_MOVE(klass) +#define LIBCAMERA_DISABLE_COPY_AND_MOVE(klass) +#endif + +#ifndef __DOXYGEN__ +#define LIBCAMERA_DECLARE_PRIVATE() \ +public: \ + class Private; \ + friend class Private; \ + template \ + const Private *_d() const \ + { \ + return Extensible::_d(); \ + } \ + template \ + Private *_d() \ + { \ + return Extensible::_d(); \ + } + +#define LIBCAMERA_DECLARE_PUBLIC(klass) \ + friend class klass; \ + using Public = klass; + +#define LIBCAMERA_O_PTR() \ + _o() + +#else +#define LIBCAMERA_DECLARE_PRIVATE() +#define LIBCAMERA_DECLARE_PUBLIC(klass) +#define LIBCAMERA_O_PTR() +#endif + +class Extensible +{ +public: + class Private + { + public: + Private(); + virtual ~Private(); + +#ifndef __DOXYGEN__ + template + const T *_o() const + { + return static_cast(o_); + } + + template + T *_o() + { + return static_cast(o_); + } +#endif + + private: + /* To initialize o_ from Extensible. */ + friend class Extensible; + Extensible *const o_; + }; + + Extensible(std::unique_ptr d); + +protected: + template + const T *_d() const + { + return static_cast(d_.get()); + } + + template + T *_d() + { + return static_cast(d_.get()); + } + +private: + const std::unique_ptr d_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/compiler.h b/spider-cam/libcamera/include/libcamera/base/compiler.h new file mode 100644 index 0000000..fda8fdf --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/compiler.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Compiler support + */ + +#pragma once + +#if __cplusplus >= 201703L +#define __nodiscard [[nodiscard]] +#else +#define __nodiscard +#endif diff --git a/spider-cam/libcamera/include/libcamera/base/event_dispatcher.h b/spider-cam/libcamera/include/libcamera/base/event_dispatcher.h new file mode 100644 index 0000000..e9a09c6 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/event_dispatcher.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Event dispatcher + */ + +#pragma once + +#include + +#include + +namespace libcamera { + +class EventNotifier; +class Timer; + +class EventDispatcher +{ +public: + virtual ~EventDispatcher(); + + virtual void registerEventNotifier(EventNotifier *notifier) = 0; + virtual void unregisterEventNotifier(EventNotifier *notifier) = 0; + + virtual void registerTimer(Timer *timer) = 0; + virtual void unregisterTimer(Timer *timer) = 0; + + virtual void processEvents() = 0; + + virtual void interrupt() = 0; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/event_dispatcher_poll.h b/spider-cam/libcamera/include/libcamera/base/event_dispatcher_poll.h new file mode 100644 index 0000000..1f7e05c --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/event_dispatcher_poll.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Poll-based event dispatcher + */ + +#pragma once + +#include +#include +#include + +#include + +#include +#include + +struct pollfd; + +namespace libcamera { + +class EventNotifier; +class Timer; + +class EventDispatcherPoll final : public EventDispatcher +{ +public: + EventDispatcherPoll(); + ~EventDispatcherPoll(); + + void registerEventNotifier(EventNotifier *notifier); + void unregisterEventNotifier(EventNotifier *notifier); + + void registerTimer(Timer *timer); + void unregisterTimer(Timer *timer); + + void processEvents(); + void interrupt(); + +private: + struct EventNotifierSetPoll { + short events() const; + EventNotifier *notifiers[3]; + }; + + int poll(std::vector *pollfds); + void processInterrupt(const struct pollfd &pfd); + void processNotifiers(const std::vector &pollfds); + void processTimers(); + + std::map notifiers_; + std::list timers_; + UniqueFD eventfd_; + + bool processingEvents_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/event_notifier.h b/spider-cam/libcamera/include/libcamera/base/event_notifier.h new file mode 100644 index 0000000..158f2d4 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/event_notifier.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * File descriptor event notifier + */ + +#pragma once + +#include + +#include +#include + +namespace libcamera { + +class Message; + +class EventNotifier : public Object +{ +public: + enum Type { + Read, + Write, + Exception, + }; + + EventNotifier(int fd, Type type, Object *parent = nullptr); + virtual ~EventNotifier(); + + Type type() const { return type_; } + int fd() const { return fd_; } + + bool enabled() const { return enabled_; } + void setEnabled(bool enable); + + Signal<> activated; + +protected: + void message(Message *msg) override; + +private: + int fd_; + Type type_; + bool enabled_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/file.h b/spider-cam/libcamera/include/libcamera/base/file.h new file mode 100644 index 0000000..5637934 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/file.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * File I/O operations + */ + +#pragma once + +#include + +#include +#include + +#include + +#include +#include +#include +#include + +namespace libcamera { + +class File +{ +public: + enum class MapFlag { + NoOption = 0, + Private = (1 << 0), + }; + + using MapFlags = Flags; + + enum class OpenModeFlag { + NotOpen = 0, + ReadOnly = (1 << 0), + WriteOnly = (1 << 1), + ReadWrite = ReadOnly | WriteOnly, + }; + + using OpenMode = Flags; + + File(const std::string &name); + File(); + ~File(); + + const std::string &fileName() const { return name_; } + void setFileName(const std::string &name); + bool exists() const; + + bool open(OpenMode mode); + bool isOpen() const { return fd_.isValid(); } + OpenMode openMode() const { return mode_; } + void close(); + + int error() const { return error_; } + ssize_t size() const; + + off_t pos() const; + off_t seek(off_t pos); + + ssize_t read(const Span &data); + ssize_t write(const Span &data); + + Span map(off_t offset = 0, ssize_t size = -1, + MapFlags flags = MapFlag::NoOption); + bool unmap(uint8_t *addr); + + static bool exists(const std::string &name); + +private: + LIBCAMERA_DISABLE_COPY(File) + + void unmapAll(); + + std::string name_; + UniqueFD fd_; + OpenMode mode_; + + int error_; + std::map maps_; +}; + +LIBCAMERA_FLAGS_ENABLE_OPERATORS(File::MapFlag) +LIBCAMERA_FLAGS_ENABLE_OPERATORS(File::OpenModeFlag) + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/flags.h b/spider-cam/libcamera/include/libcamera/base/flags.h new file mode 100644 index 0000000..af4f6e3 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/flags.h @@ -0,0 +1,193 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * Type-safe enum-based bitfields + */ + +#pragma once + +#include + +namespace libcamera { + +template +class Flags +{ +public: + static_assert(std::is_enum::value, + "Flags<> template parameter must be an enum"); + + using Type = std::underlying_type_t; + + constexpr Flags() + : value_(0) + { + } + + constexpr Flags(E flag) + : value_(static_cast(flag)) + { + } + + constexpr Flags &operator&=(E flag) + { + value_ &= static_cast(flag); + return *this; + } + + constexpr Flags &operator&=(Flags other) + { + value_ &= other.value_; + return *this; + } + + constexpr Flags &operator|=(E flag) + { + value_ |= static_cast(flag); + return *this; + } + + constexpr Flags &operator|=(Flags other) + { + value_ |= other.value_; + return *this; + } + + constexpr Flags &operator^=(E flag) + { + value_ ^= static_cast(flag); + return *this; + } + + constexpr Flags &operator^=(Flags other) + { + value_ ^= other.value_; + return *this; + } + + constexpr bool operator==(E flag) + { + return value_ == static_cast(flag); + } + + constexpr bool operator==(Flags other) + { + return value_ == static_cast(other); + } + + constexpr bool operator!=(E flag) + { + return value_ != static_cast(flag); + } + + constexpr bool operator!=(Flags other) + { + return value_ != static_cast(other); + } + + constexpr explicit operator Type() const + { + return value_; + } + + constexpr explicit operator bool() const + { + return !!value_; + } + + constexpr Flags operator&(E flag) const + { + return Flags(static_cast(value_ & static_cast(flag))); + } + + constexpr Flags operator&(Flags other) const + { + return Flags(static_cast(value_ & other.value_)); + } + + constexpr Flags operator|(E flag) const + { + return Flags(static_cast(value_ | static_cast(flag))); + } + + constexpr Flags operator|(Flags other) const + { + return Flags(static_cast(value_ | other.value_)); + } + + constexpr Flags operator^(E flag) const + { + return Flags(static_cast(value_ ^ static_cast(flag))); + } + + constexpr Flags operator^(Flags other) const + { + return Flags(static_cast(value_ ^ other.value_)); + } + + constexpr Flags operator~() const + { + return Flags(static_cast(~value_)); + } + + constexpr bool operator!() const + { + return !value_; + } + +private: + Type value_; +}; + +#ifndef __DOXYGEN__ +template +struct flags_enable_operators { + static const bool enable = false; +}; + +template +std::enable_if_t::enable, Flags> +operator|(E lhs, E rhs) +{ + using type = std::underlying_type_t; + return Flags(static_cast(static_cast(lhs) | static_cast(rhs))); +} + +template +std::enable_if_t::enable, Flags> +operator&(E lhs, E rhs) +{ + using type = std::underlying_type_t; + return Flags(static_cast(static_cast(lhs) & static_cast(rhs))); +} + +template +std::enable_if_t::enable, Flags> +operator^(E lhs, E rhs) +{ + using type = std::underlying_type_t; + return Flags(static_cast(static_cast(lhs) ^ static_cast(rhs))); +} + +template +std::enable_if_t::enable, Flags> +operator~(E rhs) +{ + using type = std::underlying_type_t; + return Flags(static_cast(~static_cast(rhs))); +} + +#define LIBCAMERA_FLAGS_ENABLE_OPERATORS(_enum) \ +template<> \ +struct flags_enable_operators<_enum> { \ + static const bool enable = true; \ +}; + +#else /* __DOXYGEN__ */ + +#define LIBCAMERA_FLAGS_ENABLE_OPERATORS(_enum) + +#endif /* __DOXYGEN__ */ + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/log.h b/spider-cam/libcamera/include/libcamera/base/log.h new file mode 100644 index 0000000..df27df0 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/log.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2018, Google Inc. + * + * Logging infrastructure + */ + +#pragma once + +#include +#include + +#include + +#include +#include + +namespace libcamera { + +enum LogSeverity { + LogInvalid = -1, + LogDebug = 0, + LogInfo, + LogWarning, + LogError, + LogFatal, +}; + +class LogCategory +{ +public: + static LogCategory *create(const char *name); + + const std::string &name() const { return name_; } + LogSeverity severity() const { return severity_; } + void setSeverity(LogSeverity severity); + + static const LogCategory &defaultCategory(); + +private: + explicit LogCategory(const char *name); + + const std::string name_; + LogSeverity severity_; +}; + +#define LOG_DECLARE_CATEGORY(name) \ +extern const LogCategory &_LOG_CATEGORY(name)(); + +#define LOG_DEFINE_CATEGORY(name) \ +LOG_DECLARE_CATEGORY(name) \ +const LogCategory &_LOG_CATEGORY(name)() \ +{ \ + /* The instance will be deleted by the Logger destructor. */ \ + static LogCategory *category = LogCategory::create(#name); \ + return *category; \ +} + +class LogMessage +{ +public: + LogMessage(const char *fileName, unsigned int line, + const LogCategory &category, LogSeverity severity, + const std::string &prefix = std::string()); + + LogMessage(LogMessage &&); + ~LogMessage(); + + std::ostream &stream() { return msgStream_; } + + const utils::time_point ×tamp() const { return timestamp_; } + LogSeverity severity() const { return severity_; } + const LogCategory &category() const { return category_; } + const std::string &fileInfo() const { return fileInfo_; } + const std::string &prefix() const { return prefix_; } + const std::string msg() const { return msgStream_.str(); } + +private: + LIBCAMERA_DISABLE_COPY(LogMessage) + + void init(const char *fileName, unsigned int line); + + std::ostringstream msgStream_; + const LogCategory &category_; + LogSeverity severity_; + utils::time_point timestamp_; + std::string fileInfo_; + std::string prefix_; +}; + +class Loggable +{ +public: + virtual ~Loggable(); + +protected: + virtual std::string logPrefix() const = 0; + + LogMessage _log(const LogCategory *category, LogSeverity severity, + const char *fileName = __builtin_FILE(), + unsigned int line = __builtin_LINE()) const; +}; + +LogMessage _log(const LogCategory *category, LogSeverity severity, + const char *fileName = __builtin_FILE(), + unsigned int line = __builtin_LINE()); + +#ifndef __DOXYGEN__ +#define _LOG_CATEGORY(name) logCategory##name + +#define _LOG1(severity) \ + _log(nullptr, Log##severity).stream() +#define _LOG2(category, severity) \ + _log(&_LOG_CATEGORY(category)(), Log##severity).stream() + +/* + * Expand the LOG() macro to _LOG1() or _LOG2() based on the number of + * arguments. + */ +#define _LOG_MACRO(_1, _2, NAME, ...) NAME +#define LOG(...) _LOG_MACRO(__VA_ARGS__, _LOG2, _LOG1)(__VA_ARGS__) +#else /* __DOXYGEN___ */ +#define LOG(category, severity) +#endif /* __DOXYGEN__ */ + +#ifndef NDEBUG +#define ASSERT(condition) static_cast(({ \ + if (!(condition)) \ + LOG(Fatal) << "assertion \"" #condition "\" failed in " \ + << __func__ << "()"; \ +})) +#else +#define ASSERT(condition) static_cast(false && (condition)) +#endif + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/meson.build b/spider-cam/libcamera/include/libcamera/base/meson.build new file mode 100644 index 0000000..bace25d --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/meson.build @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: CC0-1.0 + +libcamera_base_include_dir = libcamera_include_dir / 'base' + +libcamera_base_public_headers = files([ + 'bound_method.h', + 'class.h', + 'compiler.h', + 'flags.h', + 'object.h', + 'shared_fd.h', + 'signal.h', + 'span.h', + 'unique_fd.h', +]) + +libcamera_base_private_headers = files([ + 'backtrace.h', + 'event_dispatcher.h', + 'event_dispatcher_poll.h', + 'event_notifier.h', + 'file.h', + 'log.h', + 'message.h', + 'mutex.h', + 'private.h', + 'semaphore.h', + 'thread.h', + 'thread_annotations.h', + 'timer.h', + 'utils.h', +]) + +libcamera_base_headers = [ + libcamera_base_public_headers, + libcamera_base_private_headers, +] + +install_headers(libcamera_base_public_headers, + subdir : libcamera_base_include_dir) diff --git a/spider-cam/libcamera/include/libcamera/base/message.h b/spider-cam/libcamera/include/libcamera/base/message.h new file mode 100644 index 0000000..4b23203 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/message.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Message queue support + */ + +#pragma once + +#include + +#include + +#include + +namespace libcamera { + +class BoundMethodBase; +class Object; +class Semaphore; +class Thread; + +class Message +{ +public: + enum Type { + None = 0, + InvokeMessage = 1, + ThreadMoveMessage = 2, + DeferredDelete = 3, + UserMessage = 1000, + }; + + Message(Type type); + virtual ~Message(); + + Type type() const { return type_; } + Object *receiver() const { return receiver_; } + + static Type registerMessageType(); + +private: + friend class Thread; + + Type type_; + Object *receiver_; + + static std::atomic_uint nextUserType_; +}; + +class InvokeMessage : public Message +{ +public: + InvokeMessage(BoundMethodBase *method, + std::shared_ptr pack, + Semaphore *semaphore = nullptr, + bool deleteMethod = false); + ~InvokeMessage(); + + Semaphore *semaphore() const { return semaphore_; } + + void invoke(); + +private: + BoundMethodBase *method_; + std::shared_ptr pack_; + Semaphore *semaphore_; + bool deleteMethod_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/mutex.h b/spider-cam/libcamera/include/libcamera/base/mutex.h new file mode 100644 index 0000000..fa9a8d0 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/mutex.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Mutex classes with clang thread safety annotation + */ + +#pragma once + +#include +#include + +#include + +#include + +namespace libcamera { + +/* \todo using Mutex = std::mutex if libc++ is used. */ + +#ifndef __DOXYGEN__ + +class LIBCAMERA_TSA_CAPABILITY("mutex") Mutex final +{ +public: + constexpr Mutex() + { + } + + void lock() LIBCAMERA_TSA_ACQUIRE() + { + mutex_.lock(); + } + + void unlock() LIBCAMERA_TSA_RELEASE() + { + mutex_.unlock(); + } + +private: + friend class MutexLocker; + + std::mutex mutex_; +}; + +class LIBCAMERA_TSA_SCOPED_CAPABILITY MutexLocker final +{ +public: + explicit MutexLocker(Mutex &mutex) LIBCAMERA_TSA_ACQUIRE(mutex) + : lock_(mutex.mutex_) + { + } + + MutexLocker(Mutex &mutex, std::defer_lock_t t) noexcept LIBCAMERA_TSA_EXCLUDES(mutex) + : lock_(mutex.mutex_, t) + { + } + + ~MutexLocker() LIBCAMERA_TSA_RELEASE() + { + } + + void lock() LIBCAMERA_TSA_ACQUIRE() + { + lock_.lock(); + } + + bool try_lock() LIBCAMERA_TSA_TRY_ACQUIRE(true) + { + return lock_.try_lock(); + } + + void unlock() LIBCAMERA_TSA_RELEASE() + { + lock_.unlock(); + } + +private: + friend class ConditionVariable; + + std::unique_lock lock_; +}; + +class ConditionVariable final +{ +public: + ConditionVariable() + { + } + + void notify_one() noexcept + { + cv_.notify_one(); + } + + void notify_all() noexcept + { + cv_.notify_all(); + } + + template + void wait(MutexLocker &locker, Predicate stopWaiting) + { + cv_.wait(locker.lock_, stopWaiting); + } + + template + bool wait_for(MutexLocker &locker, + const std::chrono::duration &relTime, + Predicate stopWaiting) + { + return cv_.wait_for(locker.lock_, relTime, stopWaiting); + } + +private: + std::condition_variable cv_; +}; + +#else /* __DOXYGEN__ */ + +class Mutex final +{ +}; + +class MutexLocker final +{ +}; + +class ConditionVariable final +{ +}; + +#endif /* __DOXYGEN__ */ +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/object.h b/spider-cam/libcamera/include/libcamera/base/object.h new file mode 100644 index 0000000..508773c --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/object.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Base object + */ + +#pragma once + +#include +#include +#include + +#include + +namespace libcamera { + +class Message; +template +class Signal; +class SignalBase; +class Thread; + +class Object +{ +public: + Object(Object *parent = nullptr); + virtual ~Object(); + + void deleteLater(); + + void postMessage(std::unique_ptr msg); + + template::value> * = nullptr> + R invokeMethod(R (T::*func)(FuncArgs...), ConnectionType type, + Args&&... args) + { + T *obj = static_cast(this); + auto *method = new BoundMethodMember(obj, this, func, type); + return method->activate(args..., true); + } + + Thread *thread() const { return thread_; } + void moveToThread(Thread *thread); + + Object *parent() const { return parent_; } + +protected: + virtual void message(Message *msg); + + bool assertThreadBound(const char *message); + +private: + friend class SignalBase; + friend class Thread; + + void notifyThreadMove(); + + void connect(SignalBase *signal); + void disconnect(SignalBase *signal); + + Object *parent_; + std::vector children_; + + Thread *thread_; + std::list signals_; + unsigned int pendingMessages_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/private.h b/spider-cam/libcamera/include/libcamera/base/private.h new file mode 100644 index 0000000..8670c40 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/private.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Private Header Validation + * + * A selection of internal libcamera headers are installed as part + * of the libcamera package to allow sharing of a select subset of + * internal functionality with IPA module only. + * + * This functionality is not considered part of the public libcamera + * API, and can therefore potentially face ABI instabilities which + * should not be exposed to applications. IPA modules however should be + * versioned and more closely matched to the libcamera installation. + * + * Components which include this file can not be included in any file + * which forms part of the libcamera API. + */ + +#ifndef LIBCAMERA_BASE_PRIVATE +#error "Private headers must not be included in the libcamera API" +#endif diff --git a/spider-cam/libcamera/include/libcamera/base/semaphore.h b/spider-cam/libcamera/include/libcamera/base/semaphore.h new file mode 100644 index 0000000..59d4aa4 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/semaphore.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * General-purpose counting semaphore + */ + +#pragma once + +#include + +#include + +namespace libcamera { + +class Semaphore +{ +public: + Semaphore(unsigned int n = 0); + + unsigned int available() LIBCAMERA_TSA_EXCLUDES(mutex_); + void acquire(unsigned int n = 1) LIBCAMERA_TSA_EXCLUDES(mutex_); + bool tryAcquire(unsigned int n = 1) LIBCAMERA_TSA_EXCLUDES(mutex_); + void release(unsigned int n = 1) LIBCAMERA_TSA_EXCLUDES(mutex_); + +private: + Mutex mutex_; + ConditionVariable cv_; + unsigned int available_ LIBCAMERA_TSA_GUARDED_BY(mutex_); +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/shared_fd.h b/spider-cam/libcamera/include/libcamera/base/shared_fd.h new file mode 100644 index 0000000..61fe11c --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/shared_fd.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * File descriptor wrapper with shared ownership + */ + +#pragma once + +#include + +namespace libcamera { + +class UniqueFD; + +class SharedFD final +{ +public: + explicit SharedFD(const int &fd = -1); + explicit SharedFD(int &&fd); + explicit SharedFD(UniqueFD fd); + SharedFD(const SharedFD &other); + SharedFD(SharedFD &&other); + ~SharedFD(); + + SharedFD &operator=(const SharedFD &other); + SharedFD &operator=(SharedFD &&other); + + bool isValid() const { return fd_ != nullptr; } + int get() const { return fd_ ? fd_->fd() : -1; } + UniqueFD dup() const; + +private: + class Descriptor + { + public: + Descriptor(int fd, bool duplicate); + ~Descriptor(); + + int fd() const { return fd_; } + + private: + int fd_; + }; + + std::shared_ptr fd_; +}; + +static inline bool operator==(const SharedFD &lhs, const SharedFD &rhs) +{ + return lhs.get() == rhs.get(); +} + +static inline bool operator!=(const SharedFD &lhs, const SharedFD &rhs) +{ + return !(lhs == rhs); +} + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/signal.h b/spider-cam/libcamera/include/libcamera/base/signal.h new file mode 100644 index 0000000..849fbbd --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/signal.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Signal & slot implementation + */ + +#pragma once + +#include +#include +#include +#include + +#include + +namespace libcamera { + +class Object; + +class SignalBase +{ +public: + void disconnect(Object *object); + +protected: + using SlotList = std::list; + + void connect(BoundMethodBase *slot); + void disconnect(std::function match); + + SlotList slots(); + +private: + SlotList slots_; +}; + +template +class Signal : public SignalBase +{ +public: + ~Signal() + { + disconnect(); + } + +#ifndef __DOXYGEN__ + template::value> * = nullptr> + void connect(T *obj, R (T::*func)(Args...), + ConnectionType type = ConnectionTypeAuto) + { + Object *object = static_cast(obj); + SignalBase::connect(new BoundMethodMember(obj, object, func, type)); + } + + template::value> * = nullptr> +#else + template +#endif + void connect(T *obj, R (T::*func)(Args...)) + { + SignalBase::connect(new BoundMethodMember(obj, nullptr, func)); + } + +#ifndef __DOXYGEN__ + template::value +#if __cplusplus >= 201703L + && std::is_invocable_v +#endif + > * = nullptr> + void connect(T *obj, Func func, ConnectionType type = ConnectionTypeAuto) + { + Object *object = static_cast(obj); + SignalBase::connect(new BoundMethodFunctor(obj, object, func, type)); + } + + template::value +#if __cplusplus >= 201703L + && std::is_invocable_v +#endif + > * = nullptr> +#else + template +#endif + void connect(T *obj, Func func) + { + SignalBase::connect(new BoundMethodFunctor(obj, nullptr, func)); + } + + template + void connect(R (*func)(Args...)) + { + SignalBase::connect(new BoundMethodStatic(func)); + } + + void disconnect() + { + SignalBase::disconnect([]([[maybe_unused]] SlotList::iterator &iter) { + return true; + }); + } + + template + void disconnect(T *obj) + { + SignalBase::disconnect([obj](SlotList::iterator &iter) { + return (*iter)->match(obj); + }); + } + + template + void disconnect(T *obj, R (T::*func)(Args...)) + { + SignalBase::disconnect([obj, func](SlotList::iterator &iter) { + BoundMethodArgs *slot = + static_cast *>(*iter); + + if (!slot->match(obj)) + return false; + + /* + * If the object matches the slot, the slot is + * guaranteed to be a member slot, so we can safely + * cast it to BoundMethodMember to match + * func. + */ + return static_cast *>(slot)->match(func); + }); + } + + template + void disconnect(R (*func)(Args...)) + { + SignalBase::disconnect([func](SlotList::iterator &iter) { + BoundMethodArgs *slot = + static_cast *>(*iter); + + if (!slot->match(nullptr)) + return false; + + return static_cast *>(slot)->match(func); + }); + } + + void emit(Args... args) + { + /* + * Make a copy of the slots list as the slot could call the + * disconnect operation, invalidating the iterator. + */ + for (BoundMethodBase *slot : slots()) + static_cast *>(slot)->activate(args...); + } +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/span.h b/spider-cam/libcamera/include/libcamera/base/span.h new file mode 100644 index 0000000..c3e63f6 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/span.h @@ -0,0 +1,421 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * C++20 std::span<> implementation for C++11 + */ + +#pragma once + +#include +#include +#include +#include +#include + +namespace libcamera { + +static constexpr std::size_t dynamic_extent = std::numeric_limits::max(); + +template +class Span; + +namespace details { + +template +struct is_array : public std::false_type { +}; + +template +struct is_array> : public std::true_type { +}; + +template +struct is_span : public std::false_type { +}; + +template +struct is_span> : public std::true_type { +}; + +} /* namespace details */ + +namespace utils { + +template +constexpr auto size(const C &c) -> decltype(c.size()) +{ + return c.size(); +} + +template +constexpr auto data(const C &c) -> decltype(c.data()) +{ + return c.data(); +} + +template +constexpr auto data(C &c) -> decltype(c.data()) +{ + return c.data(); +} + +template +constexpr T *data(T (&array)[N]) noexcept +{ + return array; +} + +template +struct tuple_element; + +template +struct tuple_element> { + using type = T; +}; + +template +struct tuple_size; + +template +struct tuple_size> : public std::integral_constant { +}; + +template +struct tuple_size>; + +} /* namespace utils */ + +template +class Span +{ +public: + using element_type = T; + using value_type = typename std::remove_cv_t; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + using pointer = T *; + using const_pointer = const T *; + using reference = T &; + using const_reference = const T &; + using iterator = pointer; + using const_iterator = const_pointer; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + static constexpr std::size_t extent = Extent; + + template> + constexpr Span() noexcept + : data_(nullptr) + { + } + + explicit constexpr Span(pointer ptr, [[maybe_unused]] size_type count) + : data_(ptr) + { + } + + explicit constexpr Span(pointer first, [[maybe_unused]] pointer last) + : data_(first) + { + } + + template + constexpr Span(element_type (&arr)[N], + std::enable_if_t (*)[], + element_type (*)[]>::value && + N == Extent, + std::nullptr_t> = nullptr) noexcept + : data_(arr) + { + } + + template + constexpr Span(std::array &arr, + std::enable_if_t (*)[], + element_type (*)[]>::value && + N == Extent, + std::nullptr_t> = nullptr) noexcept + : data_(arr.data()) + { + } + + template + constexpr Span(const std::array &arr, + std::enable_if_t (*)[], + element_type (*)[]>::value && + N == Extent, + std::nullptr_t> = nullptr) noexcept + : data_(arr.data()) + { + } + + template + explicit constexpr Span(Container &cont, + std::enable_if_t::value && + !details::is_array::value && + !std::is_array::value && + std::is_convertible (*)[], + element_type (*)[]>::value, + std::nullptr_t> = nullptr) + : data_(utils::data(cont)) + { + } + + template + explicit constexpr Span(const Container &cont, + std::enable_if_t::value && + !details::is_array::value && + !std::is_array::value && + std::is_convertible (*)[], + element_type (*)[]>::value, + std::nullptr_t> = nullptr) + : data_(utils::data(cont)) + { + static_assert(utils::size(cont) == Extent, "Size mismatch"); + } + + template + explicit constexpr Span(const Span &s, + std::enable_if_t::value && + N == Extent, + std::nullptr_t> = nullptr) noexcept + : data_(s.data()) + { + } + + constexpr Span(const Span &other) noexcept = default; + constexpr Span &operator=(const Span &other) noexcept = default; + + constexpr iterator begin() const { return data(); } + constexpr const_iterator cbegin() const { return begin(); } + constexpr iterator end() const { return data() + size(); } + constexpr const_iterator cend() const { return end(); } + constexpr reverse_iterator rbegin() const { return reverse_iterator(end()); } + constexpr const_reverse_iterator crbegin() const { return rbegin(); } + constexpr reverse_iterator rend() const { return reverse_iterator(begin()); } + constexpr const_reverse_iterator crend() const { return rend(); } + + constexpr reference front() const { return *data(); } + constexpr reference back() const { return *(data() + size() - 1); } + constexpr reference operator[](size_type idx) const { return data()[idx]; } + constexpr pointer data() const noexcept { return data_; } + + constexpr size_type size() const noexcept { return Extent; } + constexpr size_type size_bytes() const noexcept { return size() * sizeof(element_type); } + constexpr bool empty() const noexcept { return size() == 0; } + + template + constexpr Span first() const + { + static_assert(Count <= Extent, "Count larger than size"); + return Span{ data(), Count }; + } + + constexpr Span first(std::size_t Count) const + { + return Span{ data(), Count }; + } + + template + constexpr Span last() const + { + static_assert(Count <= Extent, "Count larger than size"); + return Span{ data() + size() - Count, Count }; + } + + constexpr Span last(std::size_t Count) const + { + return Span{ data() + size() - Count, Count }; + } + + template + constexpr Span subspan() const + { + static_assert(Offset <= Extent, "Offset larger than size"); + static_assert(Count == dynamic_extent || Count + Offset <= Extent, + "Offset + Count larger than size"); + return Span{ + data() + Offset, + Count == dynamic_extent ? size() - Offset : Count + }; + } + + constexpr Span + subspan(std::size_t Offset, std::size_t Count = dynamic_extent) const + { + return Span{ + data() + Offset, + Count == dynamic_extent ? size() - Offset : Count + }; + } + +private: + pointer data_; +}; + +template +class Span +{ +public: + using element_type = T; + using value_type = typename std::remove_cv_t; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + using pointer = T *; + using const_pointer = const T *; + using reference = T &; + using const_reference = const T &; + using iterator = T *; + using const_iterator = const T *; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + static constexpr std::size_t extent = dynamic_extent; + + constexpr Span() noexcept + : data_(nullptr), size_(0) + { + } + + constexpr Span(pointer ptr, size_type count) + : data_(ptr), size_(count) + { + } + + constexpr Span(pointer first, pointer last) + : data_(first), size_(last - first) + { + } + + template + constexpr Span(element_type (&arr)[N], + std::enable_if_t (*)[], + element_type (*)[]>::value, + std::nullptr_t> = nullptr) noexcept + : data_(arr), size_(N) + { + } + + template + constexpr Span(std::array &arr, + std::enable_if_t (*)[], + element_type (*)[]>::value, + std::nullptr_t> = nullptr) noexcept + : data_(utils::data(arr)), size_(N) + { + } + + template + constexpr Span(const std::array &arr) noexcept + : data_(utils::data(arr)), size_(N) + { + } + + template + constexpr Span(Container &cont, + std::enable_if_t::value && + !details::is_array::value && + !std::is_array::value && + std::is_convertible (*)[], + element_type (*)[]>::value, + std::nullptr_t> = nullptr) + : data_(utils::data(cont)), size_(utils::size(cont)) + { + } + + template + constexpr Span(const Container &cont, + std::enable_if_t::value && + !details::is_array::value && + !std::is_array::value && + std::is_convertible (*)[], + element_type (*)[]>::value, + std::nullptr_t> = nullptr) + : data_(utils::data(cont)), size_(utils::size(cont)) + { + } + + template + constexpr Span(const Span &s, + std::enable_if_t::value, + std::nullptr_t> = nullptr) noexcept + : data_(s.data()), size_(s.size()) + { + } + + constexpr Span(const Span &other) noexcept = default; + + constexpr Span &operator=(const Span &other) noexcept + { + data_ = other.data_; + size_ = other.size_; + return *this; + } + + constexpr iterator begin() const { return data(); } + constexpr const_iterator cbegin() const { return begin(); } + constexpr iterator end() const { return data() + size(); } + constexpr const_iterator cend() const { return end(); } + constexpr reverse_iterator rbegin() const { return reverse_iterator(end()); } + constexpr const_reverse_iterator crbegin() const { return rbegin(); } + constexpr reverse_iterator rend() const { return reverse_iterator(begin()); } + constexpr const_reverse_iterator crend() const { return rend(); } + + constexpr reference front() const { return *data(); } + constexpr reference back() const { return *(data() + size() - 1); } + constexpr reference operator[](size_type idx) const { return data()[idx]; } + constexpr pointer data() const noexcept { return data_; } + + constexpr size_type size() const noexcept { return size_; } + constexpr size_type size_bytes() const noexcept { return size() * sizeof(element_type); } + constexpr bool empty() const noexcept { return size() == 0; } + + template + constexpr Span first() const + { + return Span{ data(), Count }; + } + + constexpr Span first(std::size_t Count) const + { + return { data(), Count }; + } + + template + constexpr Span last() const + { + return Span{ data() + size() - Count, Count }; + } + + constexpr Span last(std::size_t Count) const + { + return Span{ data() + size() - Count, Count }; + } + + template + constexpr Span subspan() const + { + return Span{ + data() + Offset, + Count == dynamic_extent ? size() - Offset : Count + }; + } + + constexpr Span + subspan(std::size_t Offset, std::size_t Count = dynamic_extent) const + { + return Span{ + data() + Offset, + Count == dynamic_extent ? size() - Offset : Count + }; + } + +private: + pointer data_; + size_type size_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/thread.h b/spider-cam/libcamera/include/libcamera/base/thread.h new file mode 100644 index 0000000..4f33de6 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/thread.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Thread support + */ + +#pragma once + +#include +#include +#include + +#include + +#include +#include +#include + +namespace libcamera { + +class EventDispatcher; +class Message; +class Object; +class ThreadData; +class ThreadMain; + +class Thread +{ +public: + Thread(); + virtual ~Thread(); + + void start(); + void exit(int code = 0); + bool wait(utils::duration duration = utils::duration::max()); + + bool isRunning(); + + Signal<> finished; + + static Thread *current(); + static pid_t currentId(); + + EventDispatcher *eventDispatcher(); + + void dispatchMessages(Message::Type type = Message::Type::None); + +protected: + int exec(); + virtual void run(); + +private: + void startThread(); + void finishThread(); + + void postMessage(std::unique_ptr msg, Object *receiver); + void removeMessages(Object *receiver); + + friend class Object; + friend class ThreadData; + friend class ThreadMain; + + void moveObject(Object *object); + void moveObject(Object *object, ThreadData *currentData, + ThreadData *targetData); + + std::thread thread_; + ThreadData *data_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/thread_annotations.h b/spider-cam/libcamera/include/libcamera/base/thread_annotations.h new file mode 100644 index 0000000..81930f0 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/thread_annotations.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Macro of Clang thread safety analysis + */ + +#pragma once + +#include + +/* + * Enable thread safety attributes only with clang. + * The attributes can be safely erased when compiling with other compilers. + */ +#if defined(__clang__) && !defined(SWIG) +#define LIBCAMERA_TSA_ATTRIBUTE__(x) __attribute__((x)) +#else +#define LIBCAMERA_TSA_ATTRIBUTE__(x) /* no-op */ +#endif + +/* See https://clang.llvm.org/docs/ThreadSafetyAnalysis.html for these usages. */ + +#define LIBCAMERA_TSA_CAPABILITY(x) \ + LIBCAMERA_TSA_ATTRIBUTE__(capability(x)) + +#define LIBCAMERA_TSA_SCOPED_CAPABILITY \ + LIBCAMERA_TSA_ATTRIBUTE__(scoped_lockable) + +#define LIBCAMERA_TSA_GUARDED_BY(x) \ + LIBCAMERA_TSA_ATTRIBUTE__(guarded_by(x)) + +#define LIBCAMERA_TSA_PT_GUARDED_BY(x) \ + LIBCAMERA_TSA_ATTRIBUTE__(pt_guarded_by(x)) + +#define LIBCAMERA_TSA_ACQUIRED_BEFORE(...) \ + LIBCAMERA_TSA_ATTRIBUTE__(acquired_before(__VA_ARGS__)) + +#define LIBCAMERA_TSA_ACQUIRED_AFTER(...) \ + LIBCAMERA_TSA_ATTRIBUTE__(acquired_after(__VA_ARGS__)) + +#define LIBCAMERA_TSA_REQUIRES(...) \ + LIBCAMERA_TSA_ATTRIBUTE__(requires_capability(__VA_ARGS__)) + +#define LIBCAMERA_TSA_REQUIRES_SHARED(...) \ + LIBCAMERA_TSA_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) + +#define LIBCAMERA_TSA_ACQUIRE(...) \ + LIBCAMERA_TSA_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) + +#define LIBCAMERA_TSA_ACQUIRE_SHARED(...) \ + LIBCAMERA_TSA_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) + +#define LIBCAMERA_TSA_RELEASE(...) \ + LIBCAMERA_TSA_ATTRIBUTE__(release_capability(__VA_ARGS__)) + +#define LIBCAMERA_TSA_RELEASE_SHARED(...) \ + LIBCAMERA_TSA_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) + +#define LIBCAMERA_TSA_RELEASE_GENERIC(...) \ + LIBCAMERA_TSA_ATTRIBUTE__(release_generic_capability(__VA_ARGS__)) + +#define LIBCAMERA_TSA_TRY_ACQUIRE(...) \ + LIBCAMERA_TSA_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) + +#define LIBCAMERA_TSA_TRY_ACQUIRE_SHARED(...) \ + LIBCAMERA_TSA_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) + +#define LIBCAMERA_TSA_EXCLUDES(...) \ + LIBCAMERA_TSA_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) + +#define LIBCAMERA_TSA_ASSERT_CAPABILITY(x) \ + LIBCAMERA_TSA_ATTRIBUTE__(assert_capability(x)) + +#define LIBCAMERA_TSA_ASSERT_SHARED_CAPABILITY(x) \ + LIBCAMERA_TSA_ATTRIBUTE__(assert_shared_capability(x)) + +#define LIBCAMERA_TSA_RETURN_CAPABILITY(x) \ + LIBCAMERA_TSA_ATTRIBUTE__(lock_returned(x)) + +#define LIBCAMERA_TSA_NO_THREAD_SAFETY_ANALYSIS \ + LIBCAMERA_TSA_ATTRIBUTE__(no_thread_safety_analysis) diff --git a/spider-cam/libcamera/include/libcamera/base/timer.h b/spider-cam/libcamera/include/libcamera/base/timer.h new file mode 100644 index 0000000..5ef4595 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/timer.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Generic timer + */ + +#pragma once + +#include +#include + +#include + +#include +#include + +namespace libcamera { + +class Message; + +class Timer : public Object +{ +public: + Timer(Object *parent = nullptr); + ~Timer(); + + void start(std::chrono::milliseconds duration); + void start(std::chrono::steady_clock::time_point deadline); + void stop(); + bool isRunning() const; + + std::chrono::steady_clock::time_point deadline() const { return deadline_; } + + Signal<> timeout; + +protected: + void message(Message *msg) override; + +private: + void registerTimer(); + void unregisterTimer(); + + bool running_; + std::chrono::steady_clock::time_point deadline_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/unique_fd.h b/spider-cam/libcamera/include/libcamera/base/unique_fd.h new file mode 100644 index 0000000..c9a3b5d --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/unique_fd.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * File descriptor wrapper that owns a file descriptor. + */ + +#pragma once + +#include + +#include +#include + +namespace libcamera { + +class UniqueFD final +{ +public: + UniqueFD() + : fd_(-1) + { + } + + explicit UniqueFD(int fd) + : fd_(fd) + { + } + + UniqueFD(UniqueFD &&other) + : fd_(other.release()) + { + } + + ~UniqueFD() + { + reset(); + } + + UniqueFD &operator=(UniqueFD &&other) + { + reset(other.release()); + return *this; + } + + __nodiscard int release() + { + int fd = fd_; + fd_ = -1; + return fd; + } + + void reset(int fd = -1); + + void swap(UniqueFD &other) + { + std::swap(fd_, other.fd_); + } + + int get() const { return fd_; } + bool isValid() const { return fd_ >= 0; } + +private: + LIBCAMERA_DISABLE_COPY(UniqueFD) + + int fd_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/base/utils.h b/spider-cam/libcamera/include/libcamera/base/utils.h new file mode 100644 index 0000000..734ff81 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/base/utils.h @@ -0,0 +1,410 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2018, Google Inc. + * + * Miscellaneous utility functions + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifndef __DOXYGEN__ + +/* uClibc and uClibc-ng don't provide O_TMPFILE */ +#ifndef O_TMPFILE +#define O_TMPFILE (020000000 | O_DIRECTORY) +#endif + +#endif + +namespace libcamera { + +namespace utils { + +const char *basename(const char *path); + +char *secure_getenv(const char *name); +std::string dirname(const std::string &path); + +template +std::vector map_keys(const T &map) +{ + std::vector keys; + std::transform(map.begin(), map.end(), std::back_inserter(keys), + [](const auto &value) { return value.first; }); + return keys; +} + +template +unsigned int set_overlap(InputIt1 first1, InputIt1 last1, + InputIt2 first2, InputIt2 last2) +{ + unsigned int count = 0; + + while (first1 != last1 && first2 != last2) { + if (*first1 < *first2) { + ++first1; + } else { + if (!(*first2 < *first1)) + count++; + ++first2; + } + } + + return count; +} + +using clock = std::chrono::steady_clock; +using duration = std::chrono::steady_clock::duration; +using time_point = std::chrono::steady_clock::time_point; + +struct timespec duration_to_timespec(const duration &value); +std::string time_point_to_string(const time_point &time); + +#ifndef __DOXYGEN__ +struct _hex { + uint64_t v; + unsigned int w; +}; + +std::basic_ostream> & +operator<<(std::basic_ostream> &stream, const _hex &h); +#endif + +template::value> * = nullptr> +_hex hex(T value, unsigned int width = 0); + +#ifndef __DOXYGEN__ +template<> +inline _hex hex(int8_t value, unsigned int width) +{ + return { static_cast(value), width ? width : 2 }; +} + +template<> +inline _hex hex(uint8_t value, unsigned int width) +{ + return { static_cast(value), width ? width : 2 }; +} + +template<> +inline _hex hex(int16_t value, unsigned int width) +{ + return { static_cast(value), width ? width : 4 }; +} + +template<> +inline _hex hex(uint16_t value, unsigned int width) +{ + return { static_cast(value), width ? width : 4 }; +} + +template<> +inline _hex hex(int32_t value, unsigned int width) +{ + return { static_cast(value), width ? width : 8 }; +} + +template<> +inline _hex hex(uint32_t value, unsigned int width) +{ + return { static_cast(value), width ? width : 8 }; +} + +template<> +inline _hex hex(int64_t value, unsigned int width) +{ + return { static_cast(value), width ? width : 16 }; +} + +template<> +inline _hex hex(uint64_t value, unsigned int width) +{ + return { static_cast(value), width ? width : 16 }; +} +#endif + +size_t strlcpy(char *dst, const char *src, size_t size); + +#ifndef __DOXYGEN__ +template +std::string join(const Container &items, const std::string &sep, UnaryOp op) +{ + std::ostringstream ss; + bool first = true; + + for (typename Container::const_iterator it = std::begin(items); + it != std::end(items); ++it) { + if (!first) + ss << sep; + else + first = false; + + ss << op(*it); + } + + return ss.str(); +} + +template +std::string join(const Container &items, const std::string &sep) +{ + std::ostringstream ss; + bool first = true; + + for (typename Container::const_iterator it = std::begin(items); + it != std::end(items); ++it) { + if (!first) + ss << sep; + else + first = false; + + ss << *it; + } + + return ss.str(); +} +#else +template +std::string join(const Container &items, const std::string &sep, UnaryOp op = nullptr); +#endif + +namespace details { + +class StringSplitter +{ +public: + StringSplitter(const std::string &str, const std::string &delim); + + class iterator + { + public: + using difference_type = std::size_t; + using value_type = std::string; + using pointer = value_type *; + using reference = value_type &; + using iterator_category = std::input_iterator_tag; + + iterator(const StringSplitter *ss, std::string::size_type pos); + + iterator &operator++(); + std::string operator*() const; + bool operator!=(const iterator &other) const; + + private: + const StringSplitter *ss_; + std::string::size_type pos_; + std::string::size_type next_; + }; + + iterator begin() const; + iterator end() const; + +private: + std::string str_; + std::string delim_; +}; + +} /* namespace details */ + +details::StringSplitter split(const std::string &str, const std::string &delim); + +std::string toAscii(const std::string &str); + +std::string libcameraBuildPath(); +std::string libcameraSourcePath(); + +constexpr unsigned int alignDown(unsigned int value, unsigned int alignment) +{ + return value / alignment * alignment; +} + +constexpr unsigned int alignUp(unsigned int value, unsigned int alignment) +{ + return (value + alignment - 1) / alignment * alignment; +} + +namespace details { + +template +struct reverse_adapter { + T &iterable; +}; + +template +auto begin(reverse_adapter r) +{ + return std::rbegin(r.iterable); +} + +template +auto end(reverse_adapter r) +{ + return std::rend(r.iterable); +} + +} /* namespace details */ + +template +details::reverse_adapter reverse(T &&iterable) +{ + return { iterable }; +} + +namespace details { + +template +class enumerate_iterator +{ +private: + using base_reference = typename std::iterator_traits::reference; + +public: + using difference_type = typename std::iterator_traits::difference_type; + using value_type = std::pair; + using pointer = value_type *; + using reference = value_type &; + using iterator_category = std::input_iterator_tag; + + explicit enumerate_iterator(Base iter) + : current_(iter), pos_(0) + { + } + + enumerate_iterator &operator++() + { + ++current_; + ++pos_; + return *this; + } + + bool operator!=(const enumerate_iterator &other) const + { + return current_ != other.current_; + } + + value_type operator*() const + { + return { pos_, *current_ }; + } + +private: + Base current_; + std::size_t pos_; +}; + +template +class enumerate_adapter +{ +public: + using iterator = enumerate_iterator; + + enumerate_adapter(Base begin, Base end) + : begin_(begin), end_(end) + { + } + + iterator begin() const + { + return iterator{ begin_ }; + } + + iterator end() const + { + return iterator{ end_ }; + } + +private: + const Base begin_; + const Base end_; +}; + +} /* namespace details */ + +template +auto enumerate(T &iterable) -> details::enumerate_adapter +{ + return { std::begin(iterable), std::end(iterable) }; +} + +#ifndef __DOXYGEN__ +template +auto enumerate(T (&iterable)[N]) -> details::enumerate_adapter +{ + return { std::begin(iterable), std::end(iterable) }; +} +#endif + +class Duration : public std::chrono::duration +{ + using BaseDuration = std::chrono::duration; + +public: + Duration() = default; + + template + constexpr explicit Duration(const Rep &r) + : BaseDuration(r) + { + } + + template + constexpr Duration(const std::chrono::duration &d) + : BaseDuration(d) + { + } + + template + double get() const + { + auto const c = std::chrono::duration_cast>(*this); + return c.count(); + } + + explicit constexpr operator bool() const + { + return *this != BaseDuration::zero(); + } +}; + +template +decltype(auto) abs_diff(const T &a, const T &b) +{ + if (a < b) + return b - a; + else + return a - b; +} + +double strtod(const char *__restrict nptr, char **__restrict endptr); + +template +constexpr std::underlying_type_t to_underlying(Enum e) noexcept +{ + return static_cast>(e); +} + +} /* namespace utils */ + +#ifndef __DOXYGEN__ +template +std::basic_ostream &operator<<(std::basic_ostream &os, + const utils::Duration &d); +#endif + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/camera.h b/spider-cam/libcamera/include/libcamera/camera.h new file mode 100644 index 0000000..94cee7b --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/camera.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2018, Google Inc. + * + * Camera object interface + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace libcamera { + +class FrameBuffer; +class FrameBufferAllocator; +class PipelineHandler; +class Request; + +class SensorConfiguration +{ +public: + unsigned int bitDepth = 0; + + Rectangle analogCrop; + + struct { + unsigned int binX = 1; + unsigned int binY = 1; + } binning; + + struct { + unsigned int xOddInc = 1; + unsigned int xEvenInc = 1; + unsigned int yOddInc = 1; + unsigned int yEvenInc = 1; + } skipping; + + Size outputSize; + + bool isValid() const; +}; + +class CameraConfiguration +{ +public: + enum Status { + Valid, + Adjusted, + Invalid, + }; + + using iterator = std::vector::iterator; + using const_iterator = std::vector::const_iterator; + + virtual ~CameraConfiguration(); + + void addConfiguration(const StreamConfiguration &cfg); + virtual Status validate() = 0; + + StreamConfiguration &at(unsigned int index); + const StreamConfiguration &at(unsigned int index) const; + StreamConfiguration &operator[](unsigned int index) + { + return at(index); + } + const StreamConfiguration &operator[](unsigned int index) const + { + return at(index); + } + + iterator begin(); + const_iterator begin() const; + iterator end(); + const_iterator end() const; + + bool empty() const; + std::size_t size() const; + + std::optional sensorConfig; + Orientation orientation; + +protected: + CameraConfiguration(); + + enum class ColorSpaceFlag { + None, + StreamsShareColorSpace, + }; + + using ColorSpaceFlags = Flags; + + Status validateColorSpaces(ColorSpaceFlags flags = ColorSpaceFlag::None); + + std::vector config_; +}; + +class Camera final : public Object, public std::enable_shared_from_this, + public Extensible +{ + LIBCAMERA_DECLARE_PRIVATE() + +public: + static std::shared_ptr create(std::unique_ptr d, + const std::string &id, + const std::set &streams); + + const std::string &id() const; + + Signal bufferCompleted; + Signal requestCompleted; + Signal<> disconnected; + + int acquire(); + int release(); + + const ControlInfoMap &controls() const; + const ControlList &properties() const; + + const std::set &streams() const; + + std::unique_ptr + generateConfiguration(Span roles = {}); + + std::unique_ptr + generateConfiguration(std::initializer_list roles) + { + return generateConfiguration(Span(roles.begin(), roles.end())); + } + + int configure(CameraConfiguration *config); + + std::unique_ptr createRequest(uint64_t cookie = 0); + int queueRequest(Request *request); + + int start(const ControlList *controls = nullptr); + int stop(); + +private: + LIBCAMERA_DISABLE_COPY(Camera) + + Camera(std::unique_ptr d, const std::string &id, + const std::set &streams); + ~Camera(); + + friend class PipelineHandler; + void disconnect(); + void requestComplete(Request *request); + + friend class FrameBufferAllocator; + int exportFrameBuffers(Stream *stream, + std::vector> *buffers); +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/camera_manager.h b/spider-cam/libcamera/include/libcamera/camera_manager.h new file mode 100644 index 0000000..b50df78 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/camera_manager.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2018, Google Inc. + * + * Camera management + */ + +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +namespace libcamera { + +class Camera; + +class CameraManager : public Object, public Extensible +{ + LIBCAMERA_DECLARE_PRIVATE() +public: + CameraManager(); + ~CameraManager(); + + int start(); + void stop(); + + std::vector> cameras() const; + std::shared_ptr get(const std::string &id); + + static const std::string &version() { return version_; } + + Signal> cameraAdded; + Signal> cameraRemoved; + +private: + LIBCAMERA_DISABLE_COPY(CameraManager) + + static const std::string version_; + static CameraManager *self_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/color_space.h b/spider-cam/libcamera/include/libcamera/color_space.h new file mode 100644 index 0000000..7b483cd --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/color_space.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Raspberry Pi Ltd + * + * color space definitions + */ + +#pragma once + +#include +#include + +namespace libcamera { + +class PixelFormat; + +class ColorSpace +{ +public: + enum class Primaries { + Raw, + Smpte170m, + Rec709, + Rec2020, + }; + + enum class TransferFunction { + Linear, + Srgb, + Rec709, + }; + + enum class YcbcrEncoding { + None, + Rec601, + Rec709, + Rec2020, + }; + + enum class Range { + Full, + Limited, + }; + + constexpr ColorSpace(Primaries p, TransferFunction t, YcbcrEncoding e, Range r) + : primaries(p), transferFunction(t), ycbcrEncoding(e), range(r) + { + } + + static const ColorSpace Raw; + static const ColorSpace Srgb; + static const ColorSpace Sycc; + static const ColorSpace Smpte170m; + static const ColorSpace Rec709; + static const ColorSpace Rec2020; + + Primaries primaries; + TransferFunction transferFunction; + YcbcrEncoding ycbcrEncoding; + Range range; + + std::string toString() const; + static std::string toString(const std::optional &colorSpace); + + static std::optional fromString(const std::string &str); + + bool adjust(PixelFormat format); +}; + +bool operator==(const ColorSpace &lhs, const ColorSpace &rhs); +static inline bool operator!=(const ColorSpace &lhs, const ColorSpace &rhs) +{ + return !(lhs == rhs); +} + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/control_ids.h.in b/spider-cam/libcamera/include/libcamera/control_ids.h.in new file mode 100644 index 0000000..293ba96 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/control_ids.h.in @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Control ID list + * + * This file is auto-generated. Do not edit. + */ + +#pragma once + +#include +#include +#include +#include + +#include + +namespace libcamera { + +namespace controls { + +enum { +${ids} +}; + +${controls} + +extern const ControlIdMap controls; + +${vendor_controls} + +} /* namespace controls */ + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/controls.h b/spider-cam/libcamera/include/libcamera/controls.h new file mode 100644 index 0000000..7c2bb28 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/controls.h @@ -0,0 +1,428 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Control handling + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +namespace libcamera { + +class ControlValidator; + +enum ControlType { + ControlTypeNone, + ControlTypeBool, + ControlTypeByte, + ControlTypeInteger32, + ControlTypeInteger64, + ControlTypeFloat, + ControlTypeString, + ControlTypeRectangle, + ControlTypeSize, +}; + +namespace details { + +template +struct control_type { +}; + +template<> +struct control_type { + static constexpr ControlType value = ControlTypeNone; +}; + +template<> +struct control_type { + static constexpr ControlType value = ControlTypeBool; +}; + +template<> +struct control_type { + static constexpr ControlType value = ControlTypeByte; +}; + +template<> +struct control_type { + static constexpr ControlType value = ControlTypeInteger32; +}; + +template<> +struct control_type { + static constexpr ControlType value = ControlTypeInteger64; +}; + +template<> +struct control_type { + static constexpr ControlType value = ControlTypeFloat; +}; + +template<> +struct control_type { + static constexpr ControlType value = ControlTypeString; +}; + +template<> +struct control_type { + static constexpr ControlType value = ControlTypeRectangle; +}; + +template<> +struct control_type { + static constexpr ControlType value = ControlTypeSize; +}; + +template +struct control_type> : public control_type> { +}; + +} /* namespace details */ + +class ControlValue +{ +public: + ControlValue(); + +#ifndef __DOXYGEN__ + template::value && + details::control_type::value && + !std::is_same>::value, + std::nullptr_t> = nullptr> + ControlValue(const T &value) + : type_(ControlTypeNone), numElements_(0) + { + set(details::control_type>::value, false, + &value, 1, sizeof(T)); + } + + template::value || + std::is_same>::value, + std::nullptr_t> = nullptr> +#else + template +#endif + ControlValue(const T &value) + : type_(ControlTypeNone), numElements_(0) + { + set(details::control_type>::value, true, + value.data(), value.size(), sizeof(typename T::value_type)); + } + + ~ControlValue(); + + ControlValue(const ControlValue &other); + ControlValue &operator=(const ControlValue &other); + + ControlType type() const { return type_; } + bool isNone() const { return type_ == ControlTypeNone; } + bool isArray() const { return isArray_; } + std::size_t numElements() const { return numElements_; } + Span data() const; + Span data(); + + std::string toString() const; + + bool operator==(const ControlValue &other) const; + bool operator!=(const ControlValue &other) const + { + return !(*this == other); + } + +#ifndef __DOXYGEN__ + template::value && + !std::is_same>::value, + std::nullptr_t> = nullptr> + T get() const + { + assert(type_ == details::control_type>::value); + assert(!isArray_); + + return *reinterpret_cast(data().data()); + } + + template::value || + std::is_same>::value, + std::nullptr_t> = nullptr> +#else + template +#endif + T get() const + { + assert(type_ == details::control_type>::value); + assert(isArray_); + + using V = typename T::value_type; + const V *value = reinterpret_cast(data().data()); + return T{ value, numElements_ }; + } + +#ifndef __DOXYGEN__ + template::value && + !std::is_same>::value, + std::nullptr_t> = nullptr> + void set(const T &value) + { + set(details::control_type>::value, false, + reinterpret_cast(&value), 1, sizeof(T)); + } + + template::value || + std::is_same>::value, + std::nullptr_t> = nullptr> +#else + template +#endif + void set(const T &value) + { + set(details::control_type>::value, true, + value.data(), value.size(), sizeof(typename T::value_type)); + } + + void reserve(ControlType type, bool isArray = false, + std::size_t numElements = 1); + +private: + ControlType type_ : 8; + bool isArray_; + std::size_t numElements_ : 32; + union { + uint64_t value_; + void *storage_; + }; + + void release(); + void set(ControlType type, bool isArray, const void *data, + std::size_t numElements, std::size_t elementSize); +}; + +class ControlId +{ +public: + ControlId(unsigned int id, const std::string &name, ControlType type) + : id_(id), name_(name), type_(type) + { + } + + unsigned int id() const { return id_; } + const std::string &name() const { return name_; } + ControlType type() const { return type_; } + +private: + LIBCAMERA_DISABLE_COPY_AND_MOVE(ControlId) + + unsigned int id_; + std::string name_; + ControlType type_; +}; + +static inline bool operator==(unsigned int lhs, const ControlId &rhs) +{ + return lhs == rhs.id(); +} + +static inline bool operator!=(unsigned int lhs, const ControlId &rhs) +{ + return !(lhs == rhs); +} + +static inline bool operator==(const ControlId &lhs, unsigned int rhs) +{ + return lhs.id() == rhs; +} + +static inline bool operator!=(const ControlId &lhs, unsigned int rhs) +{ + return !(lhs == rhs); +} + +template +class Control : public ControlId +{ +public: + using type = T; + + Control(unsigned int id, const char *name) + : ControlId(id, name, details::control_type>::value) + { + } + +private: + LIBCAMERA_DISABLE_COPY_AND_MOVE(Control) +}; + +class ControlInfo +{ +public: + explicit ControlInfo(const ControlValue &min = {}, + const ControlValue &max = {}, + const ControlValue &def = {}); + explicit ControlInfo(Span values, + const ControlValue &def = {}); + explicit ControlInfo(std::set values, bool def); + explicit ControlInfo(bool value); + + const ControlValue &min() const { return min_; } + const ControlValue &max() const { return max_; } + const ControlValue &def() const { return def_; } + const std::vector &values() const { return values_; } + + std::string toString() const; + + bool operator==(const ControlInfo &other) const + { + return min_ == other.min_ && max_ == other.max_; + } + + bool operator!=(const ControlInfo &other) const + { + return !(*this == other); + } + +private: + ControlValue min_; + ControlValue max_; + ControlValue def_; + std::vector values_; +}; + +using ControlIdMap = std::unordered_map; + +class ControlInfoMap : private std::unordered_map +{ +public: + using Map = std::unordered_map; + + ControlInfoMap() = default; + ControlInfoMap(const ControlInfoMap &other) = default; + ControlInfoMap(std::initializer_list init, + const ControlIdMap &idmap); + ControlInfoMap(Map &&info, const ControlIdMap &idmap); + + ControlInfoMap &operator=(const ControlInfoMap &other) = default; + + using Map::key_type; + using Map::mapped_type; + using Map::value_type; + using Map::size_type; + using Map::iterator; + using Map::const_iterator; + + using Map::begin; + using Map::cbegin; + using Map::end; + using Map::cend; + using Map::at; + using Map::empty; + using Map::size; + using Map::count; + using Map::find; + + mapped_type &at(unsigned int key); + const mapped_type &at(unsigned int key) const; + size_type count(unsigned int key) const; + iterator find(unsigned int key); + const_iterator find(unsigned int key) const; + + const ControlIdMap &idmap() const { return *idmap_; } + +private: + bool validate(); + + const ControlIdMap *idmap_ = nullptr; +}; + +class ControlList +{ +private: + using ControlListMap = std::unordered_map; + +public: + enum class MergePolicy { + KeepExisting = 0, + OverwriteExisting, + }; + + ControlList(); + ControlList(const ControlIdMap &idmap, const ControlValidator *validator = nullptr); + ControlList(const ControlInfoMap &infoMap, const ControlValidator *validator = nullptr); + + using iterator = ControlListMap::iterator; + using const_iterator = ControlListMap::const_iterator; + + iterator begin() { return controls_.begin(); } + iterator end() { return controls_.end(); } + const_iterator begin() const { return controls_.begin(); } + const_iterator end() const { return controls_.end(); } + + bool empty() const { return controls_.empty(); } + std::size_t size() const { return controls_.size(); } + + void clear() { controls_.clear(); } + void merge(const ControlList &source, MergePolicy policy = MergePolicy::KeepExisting); + + bool contains(unsigned int id) const; + + template + std::optional get(const Control &ctrl) const + { + const auto entry = controls_.find(ctrl.id()); + if (entry == controls_.end()) + return std::nullopt; + + const ControlValue &val = entry->second; + return val.get(); + } + + template + void set(const Control &ctrl, const V &value) + { + ControlValue *val = find(ctrl.id()); + if (!val) + return; + + val->set(value); + } + + template + void set(const Control> &ctrl, const std::initializer_list &value) + { + ControlValue *val = find(ctrl.id()); + if (!val) + return; + + val->set(Span, Size>{ value.begin(), value.size() }); + } + + const ControlValue &get(unsigned int id) const; + void set(unsigned int id, const ControlValue &value); + + const ControlInfoMap *infoMap() const { return infoMap_; } + const ControlIdMap *idMap() const { return idmap_; } + +private: + const ControlValue *find(unsigned int id) const; + ControlValue *find(unsigned int id); + + const ControlValidator *validator_; + const ControlIdMap *idmap_; + const ControlInfoMap *infoMap_; + + ControlListMap controls_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/fence.h b/spider-cam/libcamera/include/libcamera/fence.h new file mode 100644 index 0000000..598336c --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/fence.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Synchronization fence + */ + +#pragma once + +#include +#include + +namespace libcamera { + +class Fence +{ +public: + Fence(UniqueFD fd); + + bool isValid() const { return fd_.isValid(); } + const UniqueFD &fd() const { return fd_; } + + UniqueFD release() { return std::move(fd_); } + +private: + LIBCAMERA_DISABLE_COPY_AND_MOVE(Fence) + + UniqueFD fd_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/formats.h.in b/spider-cam/libcamera/include/libcamera/formats.h.in new file mode 100644 index 0000000..6ae7634 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/formats.h.in @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * Formats + * + * This file is auto-generated. Do not edit. + */ + +#pragma once + +#include + +#include + +namespace libcamera { + +namespace formats { + +namespace { + +constexpr uint32_t __fourcc(char a, char b, char c, char d) +{ + return (static_cast(a) << 0) | + (static_cast(b) << 8) | + (static_cast(c) << 16) | + (static_cast(d) << 24); +} + +constexpr uint64_t __mod(unsigned int vendor, unsigned int mod) +{ + return (static_cast(vendor) << 56) | + (static_cast(mod) << 0); +} + +} /* namespace */ + +${formats} + +} /* namespace formats */ + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/framebuffer.h b/spider-cam/libcamera/include/libcamera/framebuffer.h new file mode 100644 index 0000000..5ae2270 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/framebuffer.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Frame buffer handling + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace libcamera { + +class Fence; +class Request; + +struct FrameMetadata { + enum Status { + FrameSuccess, + FrameError, + FrameCancelled, + }; + + struct Plane { + unsigned int bytesused; + }; + + Status status; + unsigned int sequence; + uint64_t timestamp; + + Span planes() { return planes_; } + Span planes() const { return planes_; } + +private: + friend class FrameBuffer; + + std::vector planes_; +}; + +class FrameBuffer : public Extensible +{ + LIBCAMERA_DECLARE_PRIVATE() + +public: + struct Plane { + static constexpr unsigned int kInvalidOffset = std::numeric_limits::max(); + SharedFD fd; + unsigned int offset = kInvalidOffset; + unsigned int length; + }; + + FrameBuffer(const std::vector &planes, unsigned int cookie = 0); + FrameBuffer(std::unique_ptr d); + virtual ~FrameBuffer() {} + + const std::vector &planes() const; + Request *request() const; + const FrameMetadata &metadata() const; + + uint64_t cookie() const; + void setCookie(uint64_t cookie); + + std::unique_ptr releaseFence(); + +private: + LIBCAMERA_DISABLE_COPY_AND_MOVE(FrameBuffer) +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/framebuffer_allocator.h b/spider-cam/libcamera/include/libcamera/framebuffer_allocator.h new file mode 100644 index 0000000..f3896bf --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/framebuffer_allocator.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * FrameBuffer allocator + */ + +#pragma once + +#include +#include +#include + +#include + +namespace libcamera { + +class Camera; +class FrameBuffer; +class Stream; + +class FrameBufferAllocator +{ +public: + FrameBufferAllocator(std::shared_ptr camera); + ~FrameBufferAllocator(); + + int allocate(Stream *stream); + int free(Stream *stream); + + bool allocated() const { return !buffers_.empty(); } + const std::vector> &buffers(Stream *stream) const; + +private: + LIBCAMERA_DISABLE_COPY(FrameBufferAllocator) + + std::shared_ptr camera_; + std::map>> buffers_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/geometry.h b/spider-cam/libcamera/include/libcamera/geometry.h new file mode 100644 index 0000000..3e6f0f5 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/geometry.h @@ -0,0 +1,303 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Geometry-related classes + */ + +#pragma once + +#include +#include +#include + +#include + +namespace libcamera { + +class Rectangle; + +class Point +{ +public: + constexpr Point() + : x(0), y(0) + { + } + + constexpr Point(int xpos, int ypos) + : x(xpos), y(ypos) + { + } + + int x; + int y; + + const std::string toString() const; + + constexpr Point operator-() const + { + return { -x, -y }; + } +}; + +bool operator==(const Point &lhs, const Point &rhs); +static inline bool operator!=(const Point &lhs, const Point &rhs) +{ + return !(lhs == rhs); +} + +std::ostream &operator<<(std::ostream &out, const Point &p); + +class Size +{ +public: + constexpr Size() + : Size(0, 0) + { + } + + constexpr Size(unsigned int w, unsigned int h) + : width(w), height(h) + { + } + + unsigned int width; + unsigned int height; + + bool isNull() const { return !width && !height; } + const std::string toString() const; + + Size &alignDownTo(unsigned int hAlignment, unsigned int vAlignment) + { + width = width / hAlignment * hAlignment; + height = height / vAlignment * vAlignment; + return *this; + } + + Size &alignUpTo(unsigned int hAlignment, unsigned int vAlignment) + { + width = (width + hAlignment - 1) / hAlignment * hAlignment; + height = (height + vAlignment - 1) / vAlignment * vAlignment; + return *this; + } + + Size &boundTo(const Size &bound) + { + width = std::min(width, bound.width); + height = std::min(height, bound.height); + return *this; + } + + Size &expandTo(const Size &expand) + { + width = std::max(width, expand.width); + height = std::max(height, expand.height); + return *this; + } + + Size &growBy(const Size &margins) + { + width += margins.width; + height += margins.height; + return *this; + } + + Size &shrinkBy(const Size &margins) + { + width = width > margins.width ? width - margins.width : 0; + height = height > margins.height ? height - margins.height : 0; + return *this; + } + + __nodiscard constexpr Size alignedDownTo(unsigned int hAlignment, + unsigned int vAlignment) const + { + return { + width / hAlignment * hAlignment, + height / vAlignment * vAlignment + }; + } + + __nodiscard constexpr Size alignedUpTo(unsigned int hAlignment, + unsigned int vAlignment) const + { + return { + (width + hAlignment - 1) / hAlignment * hAlignment, + (height + vAlignment - 1) / vAlignment * vAlignment + }; + } + + __nodiscard constexpr Size boundedTo(const Size &bound) const + { + return { + std::min(width, bound.width), + std::min(height, bound.height) + }; + } + + __nodiscard constexpr Size expandedTo(const Size &expand) const + { + return { + std::max(width, expand.width), + std::max(height, expand.height) + }; + } + + __nodiscard constexpr Size grownBy(const Size &margins) const + { + return { + width + margins.width, + height + margins.height + }; + } + + __nodiscard constexpr Size shrunkBy(const Size &margins) const + { + return { + width > margins.width ? width - margins.width : 0, + height > margins.height ? height - margins.height : 0 + }; + } + + __nodiscard Size boundedToAspectRatio(const Size &ratio) const; + __nodiscard Size expandedToAspectRatio(const Size &ratio) const; + + __nodiscard Rectangle centeredTo(const Point ¢er) const; + + Size operator*(float factor) const; + Size operator/(float factor) const; + + Size &operator*=(float factor); + Size &operator/=(float factor); +}; + +bool operator==(const Size &lhs, const Size &rhs); +bool operator<(const Size &lhs, const Size &rhs); + +static inline bool operator!=(const Size &lhs, const Size &rhs) +{ + return !(lhs == rhs); +} + +static inline bool operator<=(const Size &lhs, const Size &rhs) +{ + return lhs < rhs || lhs == rhs; +} + +static inline bool operator>(const Size &lhs, const Size &rhs) +{ + return !(lhs <= rhs); +} + +static inline bool operator>=(const Size &lhs, const Size &rhs) +{ + return !(lhs < rhs); +} + +std::ostream &operator<<(std::ostream &out, const Size &s); + +class SizeRange +{ +public: + SizeRange() + : hStep(0), vStep(0) + { + } + + SizeRange(const Size &size) + : min(size), max(size), hStep(1), vStep(1) + { + } + + SizeRange(const Size &minSize, const Size &maxSize) + : min(minSize), max(maxSize), hStep(1), vStep(1) + { + } + + SizeRange(const Size &minSize, const Size &maxSize, + unsigned int hstep, unsigned int vstep) + : min(minSize), max(maxSize), hStep(hstep), vStep(vstep) + { + } + + bool contains(const Size &size) const; + + std::string toString() const; + + Size min; + Size max; + unsigned int hStep; + unsigned int vStep; +}; + +bool operator==(const SizeRange &lhs, const SizeRange &rhs); +static inline bool operator!=(const SizeRange &lhs, const SizeRange &rhs) +{ + return !(lhs == rhs); +} + +std::ostream &operator<<(std::ostream &out, const SizeRange &sr); + +class Rectangle +{ +public: + constexpr Rectangle() + : Rectangle(0, 0, 0, 0) + { + } + + constexpr Rectangle(int xpos, int ypos, const Size &size) + : x(xpos), y(ypos), width(size.width), height(size.height) + { + } + + constexpr Rectangle(int xpos, int ypos, unsigned int w, unsigned int h) + : x(xpos), y(ypos), width(w), height(h) + { + } + + constexpr explicit Rectangle(const Size &size) + : x(0), y(0), width(size.width), height(size.height) + { + } + + int x; + int y; + unsigned int width; + unsigned int height; + + bool isNull() const { return !width && !height; } + const std::string toString() const; + + Point center() const; + + Size size() const + { + return { width, height }; + } + + Point topLeft() const + { + return { x, y }; + } + + Rectangle &scaleBy(const Size &numerator, const Size &denominator); + Rectangle &translateBy(const Point &point); + + __nodiscard Rectangle boundedTo(const Rectangle &bound) const; + __nodiscard Rectangle enclosedIn(const Rectangle &boundary) const; + __nodiscard Rectangle scaledBy(const Size &numerator, + const Size &denominator) const; + __nodiscard Rectangle translatedBy(const Point &point) const; +}; + +bool operator==(const Rectangle &lhs, const Rectangle &rhs); +static inline bool operator!=(const Rectangle &lhs, const Rectangle &rhs) +{ + return !(lhs == rhs); +} + +std::ostream &operator<<(std::ostream &out, const Rectangle &r); + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/bayer_format.h b/spider-cam/libcamera/include/libcamera/internal/bayer_format.h new file mode 100644 index 0000000..5c14bb5 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/bayer_format.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Raspberry Pi Ltd + * + * Bayer Pixel Format + */ + +#pragma once + +#include +#include +#include + +#include + +#include "libcamera/internal/v4l2_pixelformat.h" + +namespace libcamera { + +enum class Transform; + +class BayerFormat +{ +public: + enum Order : uint8_t { + BGGR = 0, + GBRG = 1, + GRBG = 2, + RGGB = 3, + MONO = 4 + }; + + enum class Packing : uint16_t { + None = 0, + CSI2 = 1, + IPU3 = 2, + PISP1 = 3, + PISP2 = 4, + }; + + constexpr BayerFormat() + : order(Order::BGGR), bitDepth(0), packing(Packing::None) + { + } + + constexpr BayerFormat(Order o, uint8_t b, Packing p) + : order(o), bitDepth(b), packing(p) + { + } + + static const BayerFormat &fromMbusCode(unsigned int mbusCode); + bool isValid() const { return bitDepth != 0; } + + std::string toString() const; + + V4L2PixelFormat toV4L2PixelFormat() const; + static BayerFormat fromV4L2PixelFormat(V4L2PixelFormat v4l2Format); + PixelFormat toPixelFormat() const; + static BayerFormat fromPixelFormat(PixelFormat format); + BayerFormat transform(Transform t) const; + + Order order; + uint8_t bitDepth; + + Packing packing; +}; + +bool operator==(const BayerFormat &lhs, const BayerFormat &rhs); +static inline bool operator!=(const BayerFormat &lhs, const BayerFormat &rhs) +{ + return !(lhs == rhs); +} + +std::ostream &operator<<(std::ostream &out, const BayerFormat &f); + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/byte_stream_buffer.h b/spider-cam/libcamera/include/libcamera/internal/byte_stream_buffer.h new file mode 100644 index 0000000..5b1c10a --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/byte_stream_buffer.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Byte stream buffer + */ + +#pragma once + +#include +#include +#include + +#include +#include + +namespace libcamera { + +class ByteStreamBuffer +{ +public: + ByteStreamBuffer(const uint8_t *base, size_t size); + ByteStreamBuffer(uint8_t *base, size_t size); + ByteStreamBuffer(ByteStreamBuffer &&other); + ByteStreamBuffer &operator=(ByteStreamBuffer &&other); + + const uint8_t *base() const { return base_; } + uint32_t offset() const { return (write_ ? write_ : read_) - base_; } + size_t size() const { return size_; } + bool overflow() const { return overflow_; } + + ByteStreamBuffer carveOut(size_t size); + int skip(size_t size); + + template + int read(T *t) + { + return read(reinterpret_cast(t), sizeof(*t)); + } + + template + int read(const Span &data) + { + return read(reinterpret_cast(data.data()), + data.size_bytes()); + } + + template + const std::remove_reference_t *read(size_t count = 1) + { + using return_type = const std::remove_reference_t *; + return reinterpret_cast(read(sizeof(T), count)); + } + + template + int write(const T *t) + { + return write(reinterpret_cast(t), sizeof(*t)); + } + + template + int write(const Span &data) + { + return write(reinterpret_cast(data.data()), + data.size_bytes()); + } + +private: + LIBCAMERA_DISABLE_COPY(ByteStreamBuffer) + + void setOverflow(); + + int read(uint8_t *data, size_t size); + const uint8_t *read(size_t size, size_t count); + int write(const uint8_t *data, size_t size); + + ByteStreamBuffer *parent_; + + const uint8_t *base_; + size_t size_; + bool overflow_; + + const uint8_t *read_; + uint8_t *write_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/camera.h b/spider-cam/libcamera/include/libcamera/internal/camera.h new file mode 100644 index 0000000..0add042 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/camera.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Camera private data + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include + +#include + +namespace libcamera { + +class CameraControlValidator; +class PipelineHandler; +class Stream; + +class Camera::Private : public Extensible::Private +{ + LIBCAMERA_DECLARE_PUBLIC(Camera) + +public: + Private(PipelineHandler *pipe); + ~Private(); + + PipelineHandler *pipe() { return pipe_.get(); } + + std::list queuedRequests_; + ControlInfoMap controlInfo_; + ControlList properties_; + + uint32_t requestSequence_; + + const CameraControlValidator *validator() const { return validator_.get(); } + +private: + enum State { + CameraAvailable, + CameraAcquired, + CameraConfigured, + CameraStopping, + CameraRunning, + }; + + bool isAcquired() const; + bool isRunning() const; + int isAccessAllowed(State state, bool allowDisconnected = false, + const char *from = __builtin_FUNCTION()) const; + int isAccessAllowed(State low, State high, + bool allowDisconnected = false, + const char *from = __builtin_FUNCTION()) const; + + void disconnect(); + void setState(State state); + + std::shared_ptr pipe_; + std::string id_; + std::set streams_; + std::set activeStreams_; + + bool disconnected_; + std::atomic state_; + + std::unique_ptr validator_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/camera_controls.h b/spider-cam/libcamera/include/libcamera/internal/camera_controls.h new file mode 100644 index 0000000..4a5a3eb --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/camera_controls.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Camera controls + */ + +#pragma once + +#include "libcamera/internal/control_validator.h" + +namespace libcamera { + +class Camera; + +class CameraControlValidator final : public ControlValidator +{ +public: + CameraControlValidator(Camera *camera); + + const std::string &name() const override; + bool validate(unsigned int id) const override; + +private: + Camera *camera_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/camera_lens.h b/spider-cam/libcamera/include/libcamera/internal/camera_lens.h new file mode 100644 index 0000000..5a4b993 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/camera_lens.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * A camera lens controller + */ +#pragma once + +#include +#include + +#include +#include + +#include + +namespace libcamera { + +class MediaEntity; +class V4L2Subdevice; + +class CameraLens : protected Loggable +{ +public: + explicit CameraLens(const MediaEntity *entity); + ~CameraLens(); + + int init(); + int setFocusPosition(int32_t position); + + const std::string &model() const { return model_; } + + const ControlInfoMap &controls() const; + +protected: + std::string logPrefix() const override; + +private: + LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraLens) + + int validateLensDriver(); + + const MediaEntity *entity_; + std::unique_ptr subdev_; + + std::string model_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/camera_manager.h b/spider-cam/libcamera/include/libcamera/internal/camera_manager.h new file mode 100644 index 0000000..af9ed60 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/camera_manager.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2023, Ideas on Board Oy. + * + * Camera manager private data + */ + +#pragma once + +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "libcamera/internal/ipa_manager.h" +#include "libcamera/internal/process.h" + +namespace libcamera { + +class Camera; +class DeviceEnumerator; + +class CameraManager::Private : public Extensible::Private, public Thread +{ + LIBCAMERA_DECLARE_PUBLIC(CameraManager) + +public: + Private(); + + int start(); + void addCamera(std::shared_ptr camera) LIBCAMERA_TSA_EXCLUDES(mutex_); + void removeCamera(std::shared_ptr camera) LIBCAMERA_TSA_EXCLUDES(mutex_); + +protected: + void run() override; + +private: + int init(); + void createPipelineHandlers(); + void pipelineFactoryMatch(const PipelineHandlerFactoryBase *factory); + void cleanup() LIBCAMERA_TSA_EXCLUDES(mutex_); + + /* + * This mutex protects + * + * - initialized_ and status_ during initialization + * - cameras_ after initialization + */ + mutable Mutex mutex_; + std::vector> cameras_ LIBCAMERA_TSA_GUARDED_BY(mutex_); + + ConditionVariable cv_; + bool initialized_ LIBCAMERA_TSA_GUARDED_BY(mutex_); + int status_ LIBCAMERA_TSA_GUARDED_BY(mutex_); + + std::unique_ptr enumerator_; + + IPAManager ipaManager_; + ProcessManager processManager_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/camera_sensor.h b/spider-cam/libcamera/include/libcamera/internal/camera_sensor.h new file mode 100644 index 0000000..fc44ab9 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/camera_sensor.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * A camera sensor + */ + +#pragma once + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include "libcamera/internal/bayer_format.h" +#include "libcamera/internal/formats.h" +#include "libcamera/internal/v4l2_subdevice.h" + +namespace libcamera { + +class CameraLens; +class MediaEntity; +class SensorConfiguration; + +struct CameraSensorProperties; + +enum class Orientation; + +class CameraSensor : protected Loggable +{ +public: + explicit CameraSensor(const MediaEntity *entity); + ~CameraSensor(); + + int init(); + + const std::string &model() const { return model_; } + const std::string &id() const { return id_; } + + const MediaEntity *entity() const { return entity_; } + V4L2Subdevice *device() { return subdev_.get(); } + + CameraLens *focusLens() { return focusLens_.get(); } + + const std::vector &mbusCodes() const { return mbusCodes_; } + std::vector sizes(unsigned int mbusCode) const; + Size resolution() const; + + V4L2SubdeviceFormat getFormat(const std::vector &mbusCodes, + const Size &size) const; + int setFormat(V4L2SubdeviceFormat *format, + Transform transform = Transform::Identity); + int tryFormat(V4L2SubdeviceFormat *format) const; + + int applyConfiguration(const SensorConfiguration &config, + Transform transform = Transform::Identity, + V4L2SubdeviceFormat *sensorFormat = nullptr); + + const ControlList &properties() const { return properties_; } + int sensorInfo(IPACameraSensorInfo *info) const; + Transform computeTransform(Orientation *orientation) const; + BayerFormat::Order bayerOrder(Transform t) const; + + const ControlInfoMap &controls() const; + ControlList getControls(const std::vector &ids); + int setControls(ControlList *ctrls); + + const std::vector &testPatternModes() const + { + return testPatternModes_; + } + int setTestPatternMode(controls::draft::TestPatternModeEnum mode); + +protected: + std::string logPrefix() const override; + +private: + LIBCAMERA_DISABLE_COPY(CameraSensor) + + int generateId(); + int validateSensorDriver(); + void initVimcDefaultProperties(); + void initStaticProperties(); + void initTestPatternModes(); + int initProperties(); + int discoverAncillaryDevices(); + int applyTestPatternMode(controls::draft::TestPatternModeEnum mode); + + const MediaEntity *entity_; + std::unique_ptr subdev_; + unsigned int pad_; + + const CameraSensorProperties *staticProps_; + + std::string model_; + std::string id_; + + V4L2Subdevice::Formats formats_; + std::vector mbusCodes_; + std::vector sizes_; + std::vector testPatternModes_; + controls::draft::TestPatternModeEnum testPatternMode_; + + Size pixelArraySize_; + Rectangle activeArea_; + const BayerFormat *bayerFormat_; + bool supportFlips_; + bool flipsAlterBayerOrder_; + Orientation mountingOrientation_; + + ControlList properties_; + + std::unique_ptr focusLens_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/camera_sensor_properties.h b/spider-cam/libcamera/include/libcamera/internal/camera_sensor_properties.h new file mode 100644 index 0000000..480ac12 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/camera_sensor_properties.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Database of camera sensor properties + */ + +#pragma once + +#include +#include + +#include +#include + +namespace libcamera { + +struct CameraSensorProperties { + static const CameraSensorProperties *get(const std::string &sensor); + + Size unitCellSize; + std::map testPatternModes; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/control_serializer.h b/spider-cam/libcamera/include/libcamera/internal/control_serializer.h new file mode 100644 index 0000000..8a63ae4 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/control_serializer.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Control (de)serializer + */ + +#pragma once + +#include +#include +#include + +#include + +namespace libcamera { + +class ByteStreamBuffer; + +class ControlSerializer +{ +public: + enum class Role { + Proxy, + Worker + }; + + ControlSerializer(Role role); + + void reset(); + + static size_t binarySize(const ControlInfoMap &infoMap); + static size_t binarySize(const ControlList &list); + + int serialize(const ControlInfoMap &infoMap, ByteStreamBuffer &buffer); + int serialize(const ControlList &list, ByteStreamBuffer &buffer); + + template + T deserialize(ByteStreamBuffer &buffer); + + bool isCached(const ControlInfoMap &infoMap); + +private: + static size_t binarySize(const ControlValue &value); + static size_t binarySize(const ControlInfo &info); + + static void store(const ControlValue &value, ByteStreamBuffer &buffer); + static void store(const ControlInfo &info, ByteStreamBuffer &buffer); + + ControlValue loadControlValue(ByteStreamBuffer &buffer, + bool isArray = false, unsigned int count = 1); + ControlInfo loadControlInfo(ByteStreamBuffer &buffer); + + unsigned int serial_; + unsigned int serialSeed_; + std::vector> controlIds_; + std::vector> controlIdMaps_; + std::map infoMaps_; + std::map infoMapHandles_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/control_validator.h b/spider-cam/libcamera/include/libcamera/internal/control_validator.h new file mode 100644 index 0000000..260602f --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/control_validator.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Control validator + */ + +#pragma once + +#include + +namespace libcamera { + +class ControlId; + +class ControlValidator +{ +public: + virtual ~ControlValidator() = default; + + virtual const std::string &name() const = 0; + virtual bool validate(unsigned int id) const = 0; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/converter.h b/spider-cam/libcamera/include/libcamera/internal/converter.h new file mode 100644 index 0000000..b51563d --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/converter.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Laurent Pinchart + * Copyright 2022 NXP + * + * Generic format converter interface + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +namespace libcamera { + +class FrameBuffer; +class MediaDevice; +class PixelFormat; +class Stream; +struct StreamConfiguration; + +class Converter +{ +public: + Converter(MediaDevice *media); + virtual ~Converter(); + + virtual int loadConfiguration(const std::string &filename) = 0; + + virtual bool isValid() const = 0; + + virtual std::vector formats(PixelFormat input) = 0; + virtual SizeRange sizes(const Size &input) = 0; + + virtual std::tuple + strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size) = 0; + + virtual int configure(const StreamConfiguration &inputCfg, + const std::vector> &outputCfgs) = 0; + virtual int exportBuffers(const Stream *stream, unsigned int count, + std::vector> *buffers) = 0; + + virtual int start() = 0; + virtual void stop() = 0; + + virtual int queueBuffers(FrameBuffer *input, + const std::map &outputs) = 0; + + Signal inputBufferReady; + Signal outputBufferReady; + + const std::string &deviceNode() const { return deviceNode_; } + +private: + std::string deviceNode_; +}; + +class ConverterFactoryBase +{ +public: + ConverterFactoryBase(const std::string name, std::initializer_list compatibles); + virtual ~ConverterFactoryBase() = default; + + const std::vector &compatibles() const { return compatibles_; } + + static std::unique_ptr create(MediaDevice *media); + static std::vector &factories(); + static std::vector names(); + +private: + LIBCAMERA_DISABLE_COPY_AND_MOVE(ConverterFactoryBase) + + static void registerType(ConverterFactoryBase *factory); + + virtual std::unique_ptr createInstance(MediaDevice *media) const = 0; + + std::string name_; + std::vector compatibles_; +}; + +template +class ConverterFactory : public ConverterFactoryBase +{ +public: + ConverterFactory(const char *name, std::initializer_list compatibles) + : ConverterFactoryBase(name, compatibles) + { + } + + std::unique_ptr createInstance(MediaDevice *media) const override + { + return std::make_unique<_Converter>(media); + } +}; + +#define REGISTER_CONVERTER(name, converter, compatibles) \ + static ConverterFactory global_##converter##Factory(name, compatibles); + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/converter/converter_v4l2_m2m.h b/spider-cam/libcamera/include/libcamera/internal/converter/converter_v4l2_m2m.h new file mode 100644 index 0000000..b9e5989 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/converter/converter_v4l2_m2m.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Laurent Pinchart + * Copyright 2022 NXP + * + * V4l2 M2M Format converter interface + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include "libcamera/internal/converter.h" + +namespace libcamera { + +class FrameBuffer; +class MediaDevice; +class Size; +class SizeRange; +class Stream; +struct StreamConfiguration; +class V4L2M2MDevice; + +class V4L2M2MConverter : public Converter +{ +public: + V4L2M2MConverter(MediaDevice *media); + + int loadConfiguration([[maybe_unused]] const std::string &filename) { return 0; } + bool isValid() const { return m2m_ != nullptr; } + + std::vector formats(PixelFormat input); + SizeRange sizes(const Size &input); + + std::tuple + strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size); + + int configure(const StreamConfiguration &inputCfg, + const std::vector> &outputCfg); + int exportBuffers(const Stream *stream, unsigned int count, + std::vector> *buffers); + + int start(); + void stop(); + + int queueBuffers(FrameBuffer *input, + const std::map &outputs); + +private: + class V4L2M2MStream : protected Loggable + { + public: + V4L2M2MStream(V4L2M2MConverter *converter, const Stream *stream); + + bool isValid() const { return m2m_ != nullptr; } + + int configure(const StreamConfiguration &inputCfg, + const StreamConfiguration &outputCfg); + int exportBuffers(unsigned int count, + std::vector> *buffers); + + int start(); + void stop(); + + int queueBuffers(FrameBuffer *input, FrameBuffer *output); + + protected: + std::string logPrefix() const override; + + private: + void captureBufferReady(FrameBuffer *buffer); + void outputBufferReady(FrameBuffer *buffer); + + V4L2M2MConverter *converter_; + const Stream *stream_; + std::unique_ptr m2m_; + + unsigned int inputBufferCount_; + unsigned int outputBufferCount_; + }; + + std::unique_ptr m2m_; + + std::map> streams_; + std::map queue_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/converter/meson.build b/spider-cam/libcamera/include/libcamera/internal/converter/meson.build new file mode 100644 index 0000000..891e79e --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/converter/meson.build @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: CC0-1.0 + +libcamera_internal_headers += files([ + 'converter_v4l2_m2m.h', +]) diff --git a/spider-cam/libcamera/include/libcamera/internal/delayed_controls.h b/spider-cam/libcamera/include/libcamera/internal/delayed_controls.h new file mode 100644 index 0000000..e8d3014 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/delayed_controls.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Raspberry Pi Ltd + * + * Helper to deal with controls that take effect with a delay + */ + +#pragma once + +#include +#include + +#include + +namespace libcamera { + +class V4L2Device; + +class DelayedControls +{ +public: + struct ControlParams { + unsigned int delay; + bool priorityWrite; + }; + + DelayedControls(V4L2Device *device, + const std::unordered_map &controlParams); + + void reset(); + + bool push(const ControlList &controls); + ControlList get(uint32_t sequence); + + void applyControls(uint32_t sequence); + +private: + class Info : public ControlValue + { + public: + Info() + : updated(false) + { + } + + Info(const ControlValue &v, bool updated_ = true) + : ControlValue(v), updated(updated_) + { + } + + bool updated; + }; + + /* \todo Make the listSize configurable at instance creation time. */ + static constexpr int listSize = 16; + class ControlRingBuffer : public std::array + { + public: + Info &operator[](unsigned int index) + { + return std::array::operator[](index % listSize); + } + + const Info &operator[](unsigned int index) const + { + return std::array::operator[](index % listSize); + } + }; + + V4L2Device *device_; + /* \todo Evaluate if we should index on ControlId * or unsigned int */ + std::unordered_map controlParams_; + unsigned int maxDelay_; + + uint32_t queueCount_; + uint32_t writeCount_; + /* \todo Evaluate if we should index on ControlId * or unsigned int */ + std::unordered_map values_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/device_enumerator.h b/spider-cam/libcamera/include/libcamera/internal/device_enumerator.h new file mode 100644 index 0000000..db3532a --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/device_enumerator.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2018, Google Inc. + * + * API to enumerate and find media devices + */ + +#pragma once + +#include +#include +#include + +#include + +namespace libcamera { + +class MediaDevice; + +class DeviceMatch +{ +public: + DeviceMatch(const std::string &driver); + + void add(const std::string &entity); + + bool match(const MediaDevice *device) const; + +private: + std::string driver_; + std::vector entities_; +}; + +class DeviceEnumerator +{ +public: + static std::unique_ptr create(); + + virtual ~DeviceEnumerator(); + + virtual int init() = 0; + virtual int enumerate() = 0; + + std::shared_ptr search(const DeviceMatch &dm); + + Signal<> devicesAdded; + +protected: + std::unique_ptr createDevice(const std::string &deviceNode); + void addDevice(std::unique_ptr media); + void removeDevice(const std::string &deviceNode); + +private: + std::vector> devices_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/device_enumerator_sysfs.h b/spider-cam/libcamera/include/libcamera/internal/device_enumerator_sysfs.h new file mode 100644 index 0000000..a5bfc71 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/device_enumerator_sysfs.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * sysfs-based device enumerator + */ + +#pragma once + +#include +#include + +#include "libcamera/internal/device_enumerator.h" + +class MediaDevice; + +namespace libcamera { + +class DeviceEnumeratorSysfs final : public DeviceEnumerator +{ +public: + int init(); + int enumerate(); + +private: + int populateMediaDevice(MediaDevice *media); + std::string lookupDeviceNode(int major, int minor); +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/device_enumerator_udev.h b/spider-cam/libcamera/include/libcamera/internal/device_enumerator_udev.h new file mode 100644 index 0000000..1378c19 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/device_enumerator_udev.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2018-2019, Google Inc. + * + * udev-based device enumerator + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "libcamera/internal/device_enumerator.h" + +struct udev; +struct udev_device; +struct udev_monitor; + +namespace libcamera { + +class EventNotifier; +class MediaDevice; +class MediaEntity; + +class DeviceEnumeratorUdev final : public DeviceEnumerator +{ +public: + DeviceEnumeratorUdev(); + ~DeviceEnumeratorUdev(); + + int init(); + int enumerate(); + +private: + using DependencyMap = std::map>; + + struct MediaDeviceDeps { + MediaDeviceDeps(std::unique_ptr media, + DependencyMap deps) + : media_(std::move(media)), deps_(std::move(deps)) + { + } + + bool operator==(const MediaDeviceDeps &other) const + { + return media_ == other.media_; + } + + std::unique_ptr media_; + DependencyMap deps_; + }; + + int addUdevDevice(struct udev_device *dev); + int populateMediaDevice(MediaDevice *media, DependencyMap *deps); + std::string lookupDeviceNode(dev_t devnum); + + int addV4L2Device(dev_t devnum); + void udevNotify(); + + struct udev *udev_; + struct udev_monitor *monitor_; + EventNotifier *notifier_; + + std::set orphans_; + std::list pending_; + std::map devMap_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/dma_buf_allocator.h b/spider-cam/libcamera/include/libcamera/internal/dma_buf_allocator.h new file mode 100644 index 0000000..36ec169 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/dma_buf_allocator.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Raspberry Pi Ltd + * + * Helper class for dma-buf allocations. + */ + +#pragma once + +#include + +#include +#include + +namespace libcamera { + +class DmaBufAllocator +{ +public: + enum class DmaBufAllocatorFlag { + CmaHeap = 1 << 0, + SystemHeap = 1 << 1, + UDmaBuf = 1 << 2, + }; + + using DmaBufAllocatorFlags = Flags; + + DmaBufAllocator(DmaBufAllocatorFlags flags = DmaBufAllocatorFlag::CmaHeap); + ~DmaBufAllocator(); + bool isValid() const { return providerHandle_.isValid(); } + UniqueFD alloc(const char *name, std::size_t size); + +private: + UniqueFD allocFromHeap(const char *name, std::size_t size); + UniqueFD allocFromUDmaBuf(const char *name, std::size_t size); + UniqueFD providerHandle_; + DmaBufAllocatorFlag type_; +}; + +LIBCAMERA_FLAGS_ENABLE_OPERATORS(DmaBufAllocator::DmaBufAllocatorFlag) + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/formats.h b/spider-cam/libcamera/include/libcamera/internal/formats.h new file mode 100644 index 0000000..71895cd --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/formats.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * libcamera image formats + */ + +#pragma once + +#include +#include +#include + +#include +#include + +#include "libcamera/internal/v4l2_pixelformat.h" + +namespace libcamera { + +class PixelFormatInfo +{ +public: + enum ColourEncoding { + ColourEncodingRGB, + ColourEncodingYUV, + ColourEncodingRAW, + }; + + struct Plane { + unsigned int bytesPerGroup; + unsigned int verticalSubSampling; + }; + + bool isValid() const { return format.isValid(); } + + static const PixelFormatInfo &info(const PixelFormat &format); + static const PixelFormatInfo &info(const V4L2PixelFormat &format); + static const PixelFormatInfo &info(const std::string &name); + + unsigned int stride(unsigned int width, unsigned int plane, + unsigned int align = 1) const; + unsigned int planeSize(const Size &size, unsigned int plane, + unsigned int align = 1) const; + unsigned int planeSize(unsigned int height, unsigned int plane, + unsigned int stride) const; + unsigned int frameSize(const Size &size, unsigned int align = 1) const; + unsigned int frameSize(const Size &size, + const std::array &strides) const; + + unsigned int numPlanes() const; + + /* \todo Add support for non-contiguous memory planes */ + const char *name; + PixelFormat format; + std::vector v4l2Formats; + unsigned int bitsPerPixel; + enum ColourEncoding colourEncoding; + bool packed; + + unsigned int pixelsPerGroup; + + std::array planes; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/framebuffer.h b/spider-cam/libcamera/include/libcamera/internal/framebuffer.h new file mode 100644 index 0000000..e6698a4 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/framebuffer.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * Internal frame buffer handling + */ + +#pragma once + +#include +#include + +#include + +#include +#include + +namespace libcamera { + +class FrameBuffer::Private : public Extensible::Private +{ + LIBCAMERA_DECLARE_PUBLIC(FrameBuffer) + +public: + Private(const std::vector &planes, uint64_t cookie = 0); + virtual ~Private(); + + void setRequest(Request *request) { request_ = request; } + bool isContiguous() const { return isContiguous_; } + + Fence *fence() const { return fence_.get(); } + void setFence(std::unique_ptr fence) { fence_ = std::move(fence); } + + void cancel() { metadata_.status = FrameMetadata::FrameCancelled; } + + FrameMetadata &metadata() { return metadata_; } + +private: + std::vector planes_; + FrameMetadata metadata_; + uint64_t cookie_; + + std::unique_ptr fence_; + Request *request_; + bool isContiguous_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/ipa_data_serializer.h b/spider-cam/libcamera/include/libcamera/internal/ipa_data_serializer.h new file mode 100644 index 0000000..337c948 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/ipa_data_serializer.h @@ -0,0 +1,352 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * Image Processing Algorithm data serializer + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include "libcamera/internal/byte_stream_buffer.h" +#include "libcamera/internal/camera_sensor.h" +#include "libcamera/internal/control_serializer.h" + +namespace libcamera { + +LOG_DECLARE_CATEGORY(IPADataSerializer) + +namespace { + +template> * = nullptr> +void appendPOD(std::vector &vec, T val) +{ + constexpr size_t byteWidth = sizeof(val); + vec.resize(vec.size() + byteWidth); + memcpy(&*(vec.end() - byteWidth), &val, byteWidth); +} + +template> * = nullptr> +T readPOD(std::vector::const_iterator it, size_t pos, + std::vector::const_iterator end) +{ + ASSERT(pos + it < end); + + T ret = 0; + memcpy(&ret, &(*(it + pos)), sizeof(ret)); + + return ret; +} + +template> * = nullptr> +T readPOD(std::vector &vec, size_t pos) +{ + return readPOD(vec.cbegin(), pos, vec.end()); +} + +} /* namespace */ + +template +class IPADataSerializer +{ +public: + static std::tuple, std::vector> + serialize(const T &data, ControlSerializer *cs = nullptr); + + static T deserialize(const std::vector &data, + ControlSerializer *cs = nullptr); + static T deserialize(std::vector::const_iterator dataBegin, + std::vector::const_iterator dataEnd, + ControlSerializer *cs = nullptr); + + static T deserialize(const std::vector &data, + const std::vector &fds, + ControlSerializer *cs = nullptr); + static T deserialize(std::vector::const_iterator dataBegin, + std::vector::const_iterator dataEnd, + std::vector::const_iterator fdsBegin, + std::vector::const_iterator fdsEnd, + ControlSerializer *cs = nullptr); +}; + +#ifndef __DOXYGEN__ + +/* + * Serialization format for vector of type V: + * + * 4 bytes - uint32_t Length of vector, in number of elements + * + * For every element in the vector: + * + * 4 bytes - uint32_t Size of element, in bytes + * 4 bytes - uint32_t Number of fds for the element + * X bytes - Serialized element + * + * \todo Support elements that are references + */ +template +class IPADataSerializer> +{ +public: + static std::tuple, std::vector> + serialize(const std::vector &data, ControlSerializer *cs = nullptr) + { + std::vector dataVec; + std::vector fdsVec; + + /* Serialize the length. */ + uint32_t vecLen = data.size(); + appendPOD(dataVec, vecLen); + + /* Serialize the members. */ + for (auto const &it : data) { + std::vector dvec; + std::vector fvec; + + std::tie(dvec, fvec) = + IPADataSerializer::serialize(it, cs); + + appendPOD(dataVec, dvec.size()); + appendPOD(dataVec, fvec.size()); + + dataVec.insert(dataVec.end(), dvec.begin(), dvec.end()); + fdsVec.insert(fdsVec.end(), fvec.begin(), fvec.end()); + } + + return { dataVec, fdsVec }; + } + + static std::vector deserialize(std::vector &data, ControlSerializer *cs = nullptr) + { + return deserialize(data.cbegin(), data.cend(), cs); + } + + static std::vector deserialize(std::vector::const_iterator dataBegin, + std::vector::const_iterator dataEnd, + ControlSerializer *cs = nullptr) + { + std::vector fds; + return deserialize(dataBegin, dataEnd, fds.cbegin(), fds.cend(), cs); + } + + static std::vector deserialize(std::vector &data, std::vector &fds, + ControlSerializer *cs = nullptr) + { + return deserialize(data.cbegin(), data.cend(), fds.cbegin(), fds.cend(), cs); + } + + static std::vector deserialize(std::vector::const_iterator dataBegin, + std::vector::const_iterator dataEnd, + std::vector::const_iterator fdsBegin, + [[maybe_unused]] std::vector::const_iterator fdsEnd, + ControlSerializer *cs = nullptr) + { + uint32_t vecLen = readPOD(dataBegin, 0, dataEnd); + std::vector ret(vecLen); + + std::vector::const_iterator dataIter = dataBegin + 4; + std::vector::const_iterator fdIter = fdsBegin; + for (uint32_t i = 0; i < vecLen; i++) { + uint32_t sizeofData = readPOD(dataIter, 0, dataEnd); + uint32_t sizeofFds = readPOD(dataIter, 4, dataEnd); + dataIter += 8; + + ret[i] = IPADataSerializer::deserialize(dataIter, + dataIter + sizeofData, + fdIter, + fdIter + sizeofFds, + cs); + + dataIter += sizeofData; + fdIter += sizeofFds; + } + + return ret; + } +}; + +/* + * Serialization format for map of key type K and value type V: + * + * 4 bytes - uint32_t Length of map, in number of pairs + * + * For every pair in the map: + * + * 4 bytes - uint32_t Size of key, in bytes + * 4 bytes - uint32_t Number of fds for the key + * X bytes - Serialized key + * 4 bytes - uint32_t Size of value, in bytes + * 4 bytes - uint32_t Number of fds for the value + * X bytes - Serialized value + * + * \todo Support keys or values that are references + */ +template +class IPADataSerializer> +{ +public: + static std::tuple, std::vector> + serialize(const std::map &data, ControlSerializer *cs = nullptr) + { + std::vector dataVec; + std::vector fdsVec; + + /* Serialize the length. */ + uint32_t mapLen = data.size(); + appendPOD(dataVec, mapLen); + + /* Serialize the members. */ + for (auto const &it : data) { + std::vector dvec; + std::vector fvec; + + std::tie(dvec, fvec) = + IPADataSerializer::serialize(it.first, cs); + + appendPOD(dataVec, dvec.size()); + appendPOD(dataVec, fvec.size()); + + dataVec.insert(dataVec.end(), dvec.begin(), dvec.end()); + fdsVec.insert(fdsVec.end(), fvec.begin(), fvec.end()); + + std::tie(dvec, fvec) = + IPADataSerializer::serialize(it.second, cs); + + appendPOD(dataVec, dvec.size()); + appendPOD(dataVec, fvec.size()); + + dataVec.insert(dataVec.end(), dvec.begin(), dvec.end()); + fdsVec.insert(fdsVec.end(), fvec.begin(), fvec.end()); + } + + return { dataVec, fdsVec }; + } + + static std::map deserialize(std::vector &data, ControlSerializer *cs = nullptr) + { + return deserialize(data.cbegin(), data.cend(), cs); + } + + static std::map deserialize(std::vector::const_iterator dataBegin, + std::vector::const_iterator dataEnd, + ControlSerializer *cs = nullptr) + { + std::vector fds; + return deserialize(dataBegin, dataEnd, fds.cbegin(), fds.cend(), cs); + } + + static std::map deserialize(std::vector &data, std::vector &fds, + ControlSerializer *cs = nullptr) + { + return deserialize(data.cbegin(), data.cend(), fds.cbegin(), fds.cend(), cs); + } + + static std::map deserialize(std::vector::const_iterator dataBegin, + std::vector::const_iterator dataEnd, + std::vector::const_iterator fdsBegin, + [[maybe_unused]] std::vector::const_iterator fdsEnd, + ControlSerializer *cs = nullptr) + { + std::map ret; + + uint32_t mapLen = readPOD(dataBegin, 0, dataEnd); + + std::vector::const_iterator dataIter = dataBegin + 4; + std::vector::const_iterator fdIter = fdsBegin; + for (uint32_t i = 0; i < mapLen; i++) { + uint32_t sizeofData = readPOD(dataIter, 0, dataEnd); + uint32_t sizeofFds = readPOD(dataIter, 4, dataEnd); + dataIter += 8; + + K key = IPADataSerializer::deserialize(dataIter, + dataIter + sizeofData, + fdIter, + fdIter + sizeofFds, + cs); + + dataIter += sizeofData; + fdIter += sizeofFds; + sizeofData = readPOD(dataIter, 0, dataEnd); + sizeofFds = readPOD(dataIter, 4, dataEnd); + dataIter += 8; + + const V value = IPADataSerializer::deserialize(dataIter, + dataIter + sizeofData, + fdIter, + fdIter + sizeofFds, + cs); + ret.insert({ key, value }); + + dataIter += sizeofData; + fdIter += sizeofFds; + } + + return ret; + } +}; + +/* Serialization format for Flags is same as for PODs */ +template +class IPADataSerializer> +{ +public: + static std::tuple, std::vector> + serialize(const Flags &data, [[maybe_unused]] ControlSerializer *cs = nullptr) + { + std::vector dataVec; + dataVec.reserve(sizeof(Flags)); + appendPOD(dataVec, static_cast::Type>(data)); + + return { dataVec, {} }; + } + + static Flags deserialize(std::vector &data, + [[maybe_unused]] ControlSerializer *cs = nullptr) + { + return deserialize(data.cbegin(), data.cend()); + } + + static Flags deserialize(std::vector::const_iterator dataBegin, + std::vector::const_iterator dataEnd, + [[maybe_unused]] ControlSerializer *cs = nullptr) + { + return Flags{ static_cast(readPOD(dataBegin, 0, dataEnd)) }; + } + + static Flags deserialize(std::vector &data, + [[maybe_unused]] std::vector &fds, + [[maybe_unused]] ControlSerializer *cs = nullptr) + { + return deserialize(data.cbegin(), data.cend()); + } + + static Flags deserialize(std::vector::const_iterator dataBegin, + std::vector::const_iterator dataEnd, + [[maybe_unused]] std::vector::const_iterator fdsBegin, + [[maybe_unused]] std::vector::const_iterator fdsEnd, + [[maybe_unused]] ControlSerializer *cs = nullptr) + { + return deserialize(dataBegin, dataEnd); + } +}; + +#endif /* __DOXYGEN__ */ + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/ipa_manager.h b/spider-cam/libcamera/include/libcamera/internal/ipa_manager.h new file mode 100644 index 0000000..c6f74e1 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/ipa_manager.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Image Processing Algorithm module manager + */ + +#pragma once + +#include +#include + +#include + +#include +#include + +#include "libcamera/internal/ipa_module.h" +#include "libcamera/internal/pipeline_handler.h" +#include "libcamera/internal/pub_key.h" + +namespace libcamera { + +LOG_DECLARE_CATEGORY(IPAManager) + +class IPAManager +{ +public: + IPAManager(); + ~IPAManager(); + + template + static std::unique_ptr createIPA(PipelineHandler *pipe, + uint32_t minVersion, + uint32_t maxVersion) + { + IPAModule *m = self_->module(pipe, minVersion, maxVersion); + if (!m) + return nullptr; + + std::unique_ptr proxy = std::make_unique(m, !self_->isSignatureValid(m)); + if (!proxy->isValid()) { + LOG(IPAManager, Error) << "Failed to load proxy"; + return nullptr; + } + + return proxy; + } + +#if HAVE_IPA_PUBKEY + static const PubKey &pubKey() + { + return pubKey_; + } +#endif + +private: + static IPAManager *self_; + + void parseDir(const char *libDir, unsigned int maxDepth, + std::vector &files); + unsigned int addDir(const char *libDir, unsigned int maxDepth = 0); + + IPAModule *module(PipelineHandler *pipe, uint32_t minVersion, + uint32_t maxVersion); + + bool isSignatureValid(IPAModule *ipa) const; + + std::vector modules_; + +#if HAVE_IPA_PUBKEY + static const uint8_t publicKeyData_[]; + static const PubKey pubKey_; +#endif +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/ipa_module.h b/spider-cam/libcamera/include/libcamera/internal/ipa_module.h new file mode 100644 index 0000000..7c49d3f --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/ipa_module.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Image Processing Algorithm module + */ + +#pragma once + +#include +#include +#include + +#include + +#include +#include + +#include "libcamera/internal/pipeline_handler.h" + +namespace libcamera { + +class IPAModule : public Loggable +{ +public: + explicit IPAModule(const std::string &libPath); + ~IPAModule(); + + bool isValid() const; + + const struct IPAModuleInfo &info() const; + const std::vector signature() const; + const std::string &path() const; + + bool load(); + + IPAInterface *createInterface(); + + bool match(PipelineHandler *pipe, + uint32_t minVersion, uint32_t maxVersion) const; + +protected: + std::string logPrefix() const override; + +private: + int loadIPAModuleInfo(); + + struct IPAModuleInfo info_; + std::vector signature_; + + std::string libPath_; + bool valid_; + bool loaded_; + + void *dlHandle_; + typedef IPAInterface *(*IPAIntfFactory)(void); + IPAIntfFactory ipaCreate_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/ipa_proxy.h b/spider-cam/libcamera/include/libcamera/internal/ipa_proxy.h new file mode 100644 index 0000000..ed6a5bc --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/ipa_proxy.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Image Processing Algorithm proxy + */ + +#pragma once + +#include +#include +#include + +#include + +namespace libcamera { + +class IPAModule; + +class IPAProxy : public IPAInterface +{ +public: + enum ProxyState { + ProxyStopped, + ProxyStopping, + ProxyRunning, + }; + + IPAProxy(IPAModule *ipam); + ~IPAProxy(); + + bool isValid() const { return valid_; } + + std::string configurationFile(const std::string &file) const; + +protected: + std::string resolvePath(const std::string &file) const; + + bool valid_; + ProxyState state_; + +private: + IPAModule *ipam_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/ipc_pipe.h b/spider-cam/libcamera/include/libcamera/internal/ipc_pipe.h new file mode 100644 index 0000000..a456075 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/ipc_pipe.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * Image Processing Algorithm IPC module for IPA proxies + */ + +#pragma once + +#include + +#include +#include + +#include "libcamera/internal/ipc_unixsocket.h" + +namespace libcamera { + +class IPCMessage +{ +public: + struct Header { + uint32_t cmd; + uint32_t cookie; + }; + + IPCMessage(); + IPCMessage(uint32_t cmd); + IPCMessage(const Header &header); + IPCMessage(IPCUnixSocket::Payload &payload); + + IPCUnixSocket::Payload payload() const; + + Header &header() { return header_; } + std::vector &data() { return data_; } + std::vector &fds() { return fds_; } + + const Header &header() const { return header_; } + const std::vector &data() const { return data_; } + const std::vector &fds() const { return fds_; } + +private: + Header header_; + + std::vector data_; + std::vector fds_; +}; + +class IPCPipe +{ +public: + IPCPipe(); + virtual ~IPCPipe(); + + bool isConnected() const { return connected_; } + + virtual int sendSync(const IPCMessage &in, + IPCMessage *out) = 0; + + virtual int sendAsync(const IPCMessage &data) = 0; + + Signal recv; + +protected: + bool connected_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/ipc_pipe_unixsocket.h b/spider-cam/libcamera/include/libcamera/internal/ipc_pipe_unixsocket.h new file mode 100644 index 0000000..4a0f6d5 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/ipc_pipe_unixsocket.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * Image Processing Algorithm IPC module using unix socket + */ + +#pragma once + +#include +#include +#include + +#include "libcamera/internal/ipc_pipe.h" +#include "libcamera/internal/ipc_unixsocket.h" + +namespace libcamera { + +class Process; + +class IPCPipeUnixSocket : public IPCPipe +{ +public: + IPCPipeUnixSocket(const char *ipaModulePath, const char *ipaProxyWorkerPath); + ~IPCPipeUnixSocket(); + + int sendSync(const IPCMessage &in, + IPCMessage *out = nullptr) override; + + int sendAsync(const IPCMessage &data) override; + +private: + struct CallData { + IPCUnixSocket::Payload *response; + bool done; + }; + + void readyRead(); + int call(const IPCUnixSocket::Payload &message, + IPCUnixSocket::Payload *response, uint32_t seq); + + std::unique_ptr proc_; + std::unique_ptr socket_; + std::map callData_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/ipc_unixsocket.h b/spider-cam/libcamera/include/libcamera/internal/ipc_unixsocket.h new file mode 100644 index 0000000..48bb7a9 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/ipc_unixsocket.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * IPC mechanism based on Unix sockets + */ + +#pragma once + +#include +#include +#include + +#include +#include + +namespace libcamera { + +class EventNotifier; + +class IPCUnixSocket +{ +public: + struct Payload { + std::vector data; + std::vector fds; + }; + + IPCUnixSocket(); + ~IPCUnixSocket(); + + UniqueFD create(); + int bind(UniqueFD fd); + void close(); + bool isBound() const; + + int send(const Payload &payload); + int receive(Payload *payload); + + Signal<> readyRead; + +private: + struct Header { + uint32_t data; + uint8_t fds; + }; + + int sendData(const void *buffer, size_t length, const int32_t *fds, unsigned int num); + int recvData(void *buffer, size_t length, int32_t *fds, unsigned int num); + + void dataNotifier(); + + UniqueFD fd_; + bool headerReceived_; + struct Header header_; + EventNotifier *notifier_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/mapped_framebuffer.h b/spider-cam/libcamera/include/libcamera/internal/mapped_framebuffer.h new file mode 100644 index 0000000..6aaabf5 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/mapped_framebuffer.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Frame buffer memory mapping support + */ + +#pragma once + +#include +#include + +#include +#include +#include + +#include + +namespace libcamera { + +class MappedBuffer +{ +public: + using Plane = Span; + + ~MappedBuffer(); + + MappedBuffer(MappedBuffer &&other); + MappedBuffer &operator=(MappedBuffer &&other); + + bool isValid() const { return error_ == 0; } + int error() const { return error_; } + const std::vector &planes() const { return planes_; } + +protected: + MappedBuffer(); + + int error_; + std::vector planes_; + std::vector maps_; + +private: + LIBCAMERA_DISABLE_COPY(MappedBuffer) +}; + +class MappedFrameBuffer : public MappedBuffer +{ +public: + enum class MapFlag { + Read = 1 << 0, + Write = 1 << 1, + ReadWrite = Read | Write, + }; + + using MapFlags = Flags; + + MappedFrameBuffer(const FrameBuffer *buffer, MapFlags flags); +}; + +LIBCAMERA_FLAGS_ENABLE_OPERATORS(MappedFrameBuffer::MapFlag) + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/media_device.h b/spider-cam/libcamera/include/libcamera/internal/media_device.h new file mode 100644 index 0000000..bf2e475 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/media_device.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2018, Google Inc. + * + * Media device handler + */ + +#pragma once + +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include "libcamera/internal/media_object.h" + +namespace libcamera { + +class MediaDevice : protected Loggable +{ +public: + MediaDevice(const std::string &deviceNode); + ~MediaDevice(); + + bool acquire(); + void release(); + bool busy() const { return acquired_; } + + bool lock(); + void unlock(); + + int populate(); + bool isValid() const { return valid_; } + + const std::string &driver() const { return driver_; } + const std::string &deviceNode() const { return deviceNode_; } + const std::string &model() const { return model_; } + unsigned int version() const { return version_; } + unsigned int hwRevision() const { return hwRevision_; } + + const std::vector &entities() const { return entities_; } + MediaEntity *getEntityByName(const std::string &name) const; + + MediaLink *link(const std::string &sourceName, unsigned int sourceIdx, + const std::string &sinkName, unsigned int sinkIdx); + MediaLink *link(const MediaEntity *source, unsigned int sourceIdx, + const MediaEntity *sink, unsigned int sinkIdx); + MediaLink *link(const MediaPad *source, const MediaPad *sink); + int disableLinks(); + + Signal<> disconnected; + +protected: + std::string logPrefix() const override; + +private: + int open(); + void close(); + + MediaObject *object(unsigned int id); + bool addObject(MediaObject *object); + void clear(); + + struct media_v2_interface *findInterface(const struct media_v2_topology &topology, + unsigned int entityId); + bool populateEntities(const struct media_v2_topology &topology); + bool populatePads(const struct media_v2_topology &topology); + bool populateLinks(const struct media_v2_topology &topology); + void fixupEntityFlags(struct media_v2_entity *entity); + + friend int MediaLink::setEnabled(bool enable); + int setupLink(const MediaLink *link, unsigned int flags); + + std::string driver_; + std::string deviceNode_; + std::string model_; + unsigned int version_; + unsigned int hwRevision_; + + UniqueFD fd_; + bool valid_; + bool acquired_; + + std::map objects_; + std::vector entities_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/media_object.h b/spider-cam/libcamera/include/libcamera/internal/media_object.h new file mode 100644 index 0000000..c9d7751 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/media_object.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2018, Google Inc. + * + * Media Device objects: entities, pads and links. + */ + +#pragma once + +#include +#include + +#include + +#include + +namespace libcamera { + +class MediaDevice; +class MediaEntity; +class MediaPad; + +class MediaObject +{ +public: + MediaDevice *device() { return dev_; } + const MediaDevice *device() const { return dev_; } + unsigned int id() const { return id_; } + +protected: + friend class MediaDevice; + + MediaObject(MediaDevice *dev, unsigned int id) + : dev_(dev), id_(id) + { + } + virtual ~MediaObject() = default; + + MediaDevice *dev_; + unsigned int id_; +}; + +class MediaLink : public MediaObject +{ +public: + MediaPad *source() const { return source_; } + MediaPad *sink() const { return sink_; } + unsigned int flags() const { return flags_; } + int setEnabled(bool enable); + +private: + LIBCAMERA_DISABLE_COPY_AND_MOVE(MediaLink) + + friend class MediaDevice; + + MediaLink(const struct media_v2_link *link, + MediaPad *source, MediaPad *sink); + + MediaPad *source_; + MediaPad *sink_; + unsigned int flags_; +}; + +class MediaPad : public MediaObject +{ +public: + unsigned int index() const { return index_; } + MediaEntity *entity() const { return entity_; } + unsigned int flags() const { return flags_; } + const std::vector &links() const { return links_; } + + void addLink(MediaLink *link); + +private: + LIBCAMERA_DISABLE_COPY_AND_MOVE(MediaPad) + + friend class MediaDevice; + + MediaPad(const struct media_v2_pad *pad, MediaEntity *entity); + + unsigned int index_; + MediaEntity *entity_; + unsigned int flags_; + + std::vector links_; +}; + +class MediaEntity : public MediaObject +{ +public: + enum class Type { + Invalid, + MediaEntity, + V4L2Subdevice, + V4L2VideoDevice, + }; + + const std::string &name() const { return name_; } + unsigned int function() const { return function_; } + unsigned int flags() const { return flags_; } + Type type() const { return type_; } + const std::string &deviceNode() const { return deviceNode_; } + unsigned int deviceMajor() const { return major_; } + unsigned int deviceMinor() const { return minor_; } + + const std::vector &pads() const { return pads_; } + const std::vector ancillaryEntities() const { return ancillaryEntities_; } + + const MediaPad *getPadByIndex(unsigned int index) const; + const MediaPad *getPadById(unsigned int id) const; + + int setDeviceNode(const std::string &deviceNode); + +private: + LIBCAMERA_DISABLE_COPY_AND_MOVE(MediaEntity) + + friend class MediaDevice; + + MediaEntity(MediaDevice *dev, const struct media_v2_entity *entity, + const struct media_v2_interface *iface); + + void addPad(MediaPad *pad); + + void addAncillaryEntity(MediaEntity *ancillaryEntity); + + std::string name_; + unsigned int function_; + unsigned int flags_; + Type type_; + std::string deviceNode_; + unsigned int major_; + unsigned int minor_; + + std::vector pads_; + std::vector ancillaryEntities_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/meson.build b/spider-cam/libcamera/include/libcamera/internal/meson.build new file mode 100644 index 0000000..9713ea1 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/meson.build @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: CC0-1.0 + +subdir('tracepoints') + +libcamera_tracepoint_header = custom_target( + 'tp_header', + input : ['tracepoints.h.in', tracepoint_files], + output : 'tracepoints.h', + command : [gen_tracepoints_header, include_build_dir, '@OUTPUT@', '@INPUT@'], +) + +libcamera_internal_headers = files([ + 'bayer_format.h', + 'byte_stream_buffer.h', + 'camera.h', + 'camera_controls.h', + 'camera_lens.h', + 'camera_manager.h', + 'camera_sensor.h', + 'camera_sensor_properties.h', + 'control_serializer.h', + 'control_validator.h', + 'converter.h', + 'delayed_controls.h', + 'device_enumerator.h', + 'device_enumerator_sysfs.h', + 'device_enumerator_udev.h', + 'dma_buf_allocator.h', + 'formats.h', + 'framebuffer.h', + 'ipa_manager.h', + 'ipa_module.h', + 'ipa_proxy.h', + 'ipc_unixsocket.h', + 'mapped_framebuffer.h', + 'media_device.h', + 'media_object.h', + 'pipeline_handler.h', + 'process.h', + 'pub_key.h', + 'request.h', + 'shared_mem_object.h', + 'source_paths.h', + 'sysfs.h', + 'v4l2_device.h', + 'v4l2_pixelformat.h', + 'v4l2_subdevice.h', + 'v4l2_videodevice.h', + 'yaml_parser.h', +]) + +subdir('converter') +subdir('software_isp') diff --git a/spider-cam/libcamera/include/libcamera/internal/pipeline_handler.h b/spider-cam/libcamera/include/libcamera/internal/pipeline_handler.h new file mode 100644 index 0000000..746a34f --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/pipeline_handler.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2018, Google Inc. + * + * Pipeline handler infrastructure + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "libcamera/internal/ipa_proxy.h" + +namespace libcamera { + +class Camera; +class CameraConfiguration; +class CameraManager; +class DeviceEnumerator; +class DeviceMatch; +class FrameBuffer; +class MediaDevice; +class PipelineHandler; +class Request; + +class PipelineHandler : public std::enable_shared_from_this, + public Object +{ +public: + PipelineHandler(CameraManager *manager); + virtual ~PipelineHandler(); + + virtual bool match(DeviceEnumerator *enumerator) = 0; + MediaDevice *acquireMediaDevice(DeviceEnumerator *enumerator, + const DeviceMatch &dm); + + bool acquire(); + void release(Camera *camera); + + virtual std::unique_ptr generateConfiguration(Camera *camera, + Span roles) = 0; + virtual int configure(Camera *camera, CameraConfiguration *config) = 0; + + virtual int exportFrameBuffers(Camera *camera, Stream *stream, + std::vector> *buffers) = 0; + + virtual int start(Camera *camera, const ControlList *controls) = 0; + void stop(Camera *camera); + bool hasPendingRequests(const Camera *camera) const; + + void registerRequest(Request *request); + void queueRequest(Request *request); + + bool completeBuffer(Request *request, FrameBuffer *buffer); + void completeRequest(Request *request); + + std::string configurationFile(const std::string &subdir, + const std::string &name) const; + + const char *name() const { return name_; } + +protected: + void registerCamera(std::shared_ptr camera); + void hotplugMediaDevice(MediaDevice *media); + + virtual int queueRequestDevice(Camera *camera, Request *request) = 0; + virtual void stopDevice(Camera *camera) = 0; + + virtual void releaseDevice(Camera *camera); + + CameraManager *manager_; + +private: + void unlockMediaDevices(); + + void mediaDeviceDisconnected(MediaDevice *media); + virtual void disconnect(); + + void doQueueRequest(Request *request); + void doQueueRequests(); + + std::vector> mediaDevices_; + std::vector> cameras_; + + std::queue waitingRequests_; + + const char *name_; + + Mutex lock_; + unsigned int useCount_ LIBCAMERA_TSA_GUARDED_BY(lock_); + + friend class PipelineHandlerFactoryBase; +}; + +class PipelineHandlerFactoryBase +{ +public: + PipelineHandlerFactoryBase(const char *name); + virtual ~PipelineHandlerFactoryBase() = default; + + std::shared_ptr create(CameraManager *manager) const; + + const std::string &name() const { return name_; } + + static std::vector &factories(); + static const PipelineHandlerFactoryBase *getFactoryByName(const std::string &name); + +private: + static void registerType(PipelineHandlerFactoryBase *factory); + + virtual std::unique_ptr + createInstance(CameraManager *manager) const = 0; + + std::string name_; +}; + +template +class PipelineHandlerFactory final : public PipelineHandlerFactoryBase +{ +public: + PipelineHandlerFactory(const char *name) + : PipelineHandlerFactoryBase(name) + { + } + + std::unique_ptr + createInstance(CameraManager *manager) const override + { + return std::make_unique<_PipelineHandler>(manager); + } +}; + +#define REGISTER_PIPELINE_HANDLER(handler, name) \ + static PipelineHandlerFactory global_##handler##Factory(name); + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/process.h b/spider-cam/libcamera/include/libcamera/internal/process.h new file mode 100644 index 0000000..b1d07a5 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/process.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Process object + */ + +#pragma once + +#include +#include +#include + +#include +#include + +namespace libcamera { + +class EventNotifier; + +class Process final +{ +public: + enum ExitStatus { + NotExited, + NormalExit, + SignalExit, + }; + + Process(); + ~Process(); + + int start(const std::string &path, + const std::vector &args = std::vector(), + const std::vector &fds = std::vector()); + + ExitStatus exitStatus() const { return exitStatus_; } + int exitCode() const { return exitCode_; } + + void kill(); + + Signal finished; + +private: + void closeAllFdsExcept(const std::vector &fds); + int isolate(); + void died(int wstatus); + + pid_t pid_; + bool running_; + enum ExitStatus exitStatus_; + int exitCode_; + + friend class ProcessManager; +}; + +class ProcessManager +{ +public: + ProcessManager(); + ~ProcessManager(); + + void registerProcess(Process *proc); + + static ProcessManager *instance(); + + int writePipe() const; + + const struct sigaction &oldsa() const; + +private: + static ProcessManager *self_; + + void sighandler(); + + std::list processes_; + + struct sigaction oldsa_; + + EventNotifier *sigEvent_; + UniqueFD pipe_[2]; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/pub_key.h b/spider-cam/libcamera/include/libcamera/internal/pub_key.h new file mode 100644 index 0000000..c8cc04c --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/pub_key.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * Public key signature verification + */ + +#pragma once + +#include + +#include + +#if HAVE_CRYPTO +struct evp_pkey_st; +#elif HAVE_GNUTLS +struct gnutls_pubkey_st; +#endif + +namespace libcamera { + +class PubKey +{ +public: + PubKey(Span key); + ~PubKey(); + + bool isValid() const { return valid_; } + bool verify(Span data, Span sig) const; + +private: + bool valid_; +#if HAVE_CRYPTO + struct evp_pkey_st *pubkey_; +#elif HAVE_GNUTLS + struct gnutls_pubkey_st *pubkey_; +#endif +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/request.h b/spider-cam/libcamera/include/libcamera/internal/request.h new file mode 100644 index 0000000..f5d9806 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/request.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Request class private data + */ + +#pragma once + +#include +#include +#include + +#include +#include + +#include + +using namespace std::chrono_literals; + +namespace libcamera { + +class Camera; +class FrameBuffer; + +class Request::Private : public Extensible::Private +{ + LIBCAMERA_DECLARE_PUBLIC(Request) + +public: + Private(Camera *camera); + ~Private(); + + Camera *camera() const { return camera_; } + bool hasPendingBuffers() const; + + bool completeBuffer(FrameBuffer *buffer); + void complete(); + void cancel(); + void reset(); + + void prepare(std::chrono::milliseconds timeout = 0ms); + Signal<> prepared; + +private: + friend class PipelineHandler; + friend std::ostream &operator<<(std::ostream &out, const Request &r); + + void doCancelRequest(); + void emitPrepareCompleted(); + void notifierActivated(FrameBuffer *buffer); + void timeout(); + + Camera *camera_; + bool cancelled_; + uint32_t sequence_ = 0; + bool prepared_ = false; + + std::unordered_set pending_; + std::map> notifiers_; + std::unique_ptr timer_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/shared_mem_object.h b/spider-cam/libcamera/include/libcamera/internal/shared_mem_object.h new file mode 100644 index 0000000..2ab0189 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/shared_mem_object.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2023 Raspberry Pi Ltd + * Copyright (C) 2024 Andrei Konovalov + * Copyright (C) 2024 Dennis Bonke + * + * Helpers for shared memory allocations + */ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace libcamera { + +class SharedMem +{ +public: + SharedMem(); + + SharedMem(const std::string &name, std::size_t size); + SharedMem(SharedMem &&rhs); + + virtual ~SharedMem(); + + SharedMem &operator=(SharedMem &&rhs); + + const SharedFD &fd() const + { + return fd_; + } + + Span mem() const + { + return mem_; + } + + explicit operator bool() const + { + return !mem_.empty(); + } + +private: + LIBCAMERA_DISABLE_COPY(SharedMem) + + SharedFD fd_; + + Span mem_; +}; + +template::value>> +class SharedMemObject : public SharedMem +{ +public: + static constexpr std::size_t kSize = sizeof(T); + + SharedMemObject() + : SharedMem(), obj_(nullptr) + { + } + + template + SharedMemObject(const std::string &name, Args &&...args) + : SharedMem(name, kSize), obj_(nullptr) + { + if (mem().empty()) + return; + + obj_ = new (mem().data()) T(std::forward(args)...); + } + + SharedMemObject(SharedMemObject &&rhs) + : SharedMem(std::move(rhs)) + { + this->obj_ = rhs.obj_; + rhs.obj_ = nullptr; + } + + ~SharedMemObject() + { + if (obj_) + obj_->~T(); + } + + SharedMemObject &operator=(SharedMemObject &&rhs) + { + SharedMem::operator=(std::move(rhs)); + this->obj_ = rhs.obj_; + rhs.obj_ = nullptr; + return *this; + } + + T *operator->() + { + return obj_; + } + + const T *operator->() const + { + return obj_; + } + + T &operator*() + { + return *obj_; + } + + const T &operator*() const + { + return *obj_; + } + +private: + LIBCAMERA_DISABLE_COPY(SharedMemObject) + + T *obj_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/software_isp/debayer_params.h b/spider-cam/libcamera/include/libcamera/internal/software_isp/debayer_params.h new file mode 100644 index 0000000..7d8fdd4 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/software_isp/debayer_params.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2023, 2024 Red Hat Inc. + * + * Authors: + * Hans de Goede + * + * DebayerParams header + */ + +#pragma once + +#include +#include + +namespace libcamera { + +struct DebayerParams { + static constexpr unsigned int kRGBLookupSize = 256; + + using ColorLookupTable = std::array; + + ColorLookupTable red; + ColorLookupTable green; + ColorLookupTable blue; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/software_isp/meson.build b/spider-cam/libcamera/include/libcamera/internal/software_isp/meson.build new file mode 100644 index 0000000..508dddd --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/software_isp/meson.build @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: CC0-1.0 + +libcamera_internal_headers += files([ + 'debayer_params.h', + 'software_isp.h', + 'swisp_stats.h', +]) diff --git a/spider-cam/libcamera/include/libcamera/internal/software_isp/software_isp.h b/spider-cam/libcamera/include/libcamera/internal/software_isp/software_isp.h new file mode 100644 index 0000000..f8e0000 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/software_isp/software_isp.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2023, Linaro Ltd + * + * Simple software ISP implementation + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "libcamera/internal/camera_sensor.h" +#include "libcamera/internal/dma_buf_allocator.h" +#include "libcamera/internal/pipeline_handler.h" +#include "libcamera/internal/shared_mem_object.h" +#include "libcamera/internal/software_isp/debayer_params.h" + +namespace libcamera { + +class DebayerCpu; +class FrameBuffer; +class PixelFormat; +class Stream; +struct StreamConfiguration; + +LOG_DECLARE_CATEGORY(SoftwareIsp) + +class SoftwareIsp +{ +public: + SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor); + ~SoftwareIsp(); + + int loadConfiguration([[maybe_unused]] const std::string &filename) { return 0; } + + bool isValid() const; + + std::vector formats(PixelFormat input); + + SizeRange sizes(PixelFormat inputFormat, const Size &inputSize); + + std::tuple + strideAndFrameSize(const PixelFormat &outputFormat, const Size &size); + + int configure(const StreamConfiguration &inputCfg, + const std::vector> &outputCfgs, + const ControlInfoMap &sensorControls); + + int exportBuffers(const Stream *stream, unsigned int count, + std::vector> *buffers); + + void processStats(const ControlList &sensorControls); + + int start(); + void stop(); + + int queueBuffers(FrameBuffer *input, + const std::map &outputs); + + void process(FrameBuffer *input, FrameBuffer *output); + + Signal inputBufferReady; + Signal outputBufferReady; + Signal<> ispStatsReady; + Signal setSensorControls; + +private: + void saveIspParams(); + void setSensorCtrls(const ControlList &sensorControls); + void statsReady(); + void inputReady(FrameBuffer *input); + void outputReady(FrameBuffer *output); + + std::unique_ptr debayer_; + Thread ispWorkerThread_; + SharedMemObject sharedParams_; + DebayerParams debayerParams_; + DmaBufAllocator dmaHeap_; + + std::unique_ptr ipa_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/software_isp/swisp_stats.h b/spider-cam/libcamera/include/libcamera/internal/software_isp/swisp_stats.h new file mode 100644 index 0000000..ae11f11 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/software_isp/swisp_stats.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2023, Linaro Ltd + * + * Statistics data format used by the software ISP and software IPA + */ + +#pragma once + +#include +#include + +namespace libcamera { + +/** + * \brief Struct that holds the statistics for the Software ISP + * + * The struct value types are large enough to not overflow. + * Should they still overflow for some reason, no check is performed and they + * wrap around. + */ +struct SwIspStats { + /** + * \brief Holds the sum of all sampled red pixels + */ + uint64_t sumR_; + /** + * \brief Holds the sum of all sampled green pixels + */ + uint64_t sumG_; + /** + * \brief Holds the sum of all sampled blue pixels + */ + uint64_t sumB_; + /** + * \brief Number of bins in the yHistogram + */ + static constexpr unsigned int kYHistogramSize = 64; + /** + * \brief Type of the histogram. + */ + using Histogram = std::array; + /** + * \brief A histogram of luminance values + */ + Histogram yHistogram; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/source_paths.h b/spider-cam/libcamera/include/libcamera/internal/source_paths.h new file mode 100644 index 0000000..14e6471 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/source_paths.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Identify libcamera source and build paths + */ + +#pragma once + +#include + +namespace libcamera::utils { + +std::string libcameraBuildPath(); +std::string libcameraSourcePath(); + +} /* namespace libcamera::utils */ diff --git a/spider-cam/libcamera/include/libcamera/internal/sysfs.h b/spider-cam/libcamera/include/libcamera/internal/sysfs.h new file mode 100644 index 0000000..aca60fb --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/sysfs.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * Miscellaneous utility functions to access sysfs + */ + +#pragma once + +#include + +namespace libcamera { + +namespace sysfs { + +std::string charDevPath(const std::string &deviceNode); + +std::string firmwareNodePath(const std::string &device); + +} /* namespace sysfs */ + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/tracepoints.h.in b/spider-cam/libcamera/include/libcamera/internal/tracepoints.h.in new file mode 100644 index 0000000..f096209 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/tracepoints.h.in @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) {{year}}, Google Inc. + * + * Tracepoints with lttng + * + * This file is auto-generated. Do not edit. + */ +#ifndef __LIBCAMERA_INTERNAL_TRACEPOINTS_H__ +#define __LIBCAMERA_INTERNAL_TRACEPOINTS_H__ + +#if HAVE_TRACING +#define LIBCAMERA_TRACEPOINT(...) tracepoint(libcamera, __VA_ARGS__) + +#define LIBCAMERA_TRACEPOINT_IPA_BEGIN(pipe, func) \ +tracepoint(libcamera, ipa_call_begin, #pipe, #func) + +#define LIBCAMERA_TRACEPOINT_IPA_END(pipe, func) \ +tracepoint(libcamera, ipa_call_end, #pipe, #func) + +#else + +namespace { + +template +inline void unused([[maybe_unused]] Args&& ...args) +{ +} + +} /* namespace */ + +#define LIBCAMERA_TRACEPOINT(category, ...) unused(__VA_ARGS__) + +#define LIBCAMERA_TRACEPOINT_IPA_BEGIN(pipe, func) +#define LIBCAMERA_TRACEPOINT_IPA_END(pipe, func) + +#endif /* HAVE_TRACING */ + +#endif /* __LIBCAMERA_INTERNAL_TRACEPOINTS_H__ */ + + +#if HAVE_TRACING + +#undef TRACEPOINT_PROVIDER +#define TRACEPOINT_PROVIDER libcamera + +#undef TRACEPOINT_INCLUDE +#define TRACEPOINT_INCLUDE "{{path}}" + +#if !defined(INCLUDE_LIBCAMERA_INTERNAL_TRACEPOINTS_TP_H) || defined(TRACEPOINT_HEADER_MULTI_READ) +#define INCLUDE_LIBCAMERA_INTERNAL_TRACEPOINTS_TP_H + +#include + +{{source}} + +#endif /* INCLUDE_LIBCAMERA_INTERNAL_TRACEPOINTS_TP_H */ + +#include + +#endif /* HAVE_TRACING */ diff --git a/spider-cam/libcamera/include/libcamera/internal/tracepoints/buffer_enums.tp b/spider-cam/libcamera/include/libcamera/internal/tracepoints/buffer_enums.tp new file mode 100644 index 0000000..c5a9325 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/tracepoints/buffer_enums.tp @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * buffer_enums.tp - Tracepoint definition for enums in the buffer class + */ + +TRACEPOINT_ENUM( + libcamera, + buffer_status, + TP_ENUM_VALUES( + ctf_enum_value("FrameSuccess", 0) + ctf_enum_value("FrameError", 1) + ctf_enum_value("FrameCancelled", 2) + ) +) diff --git a/spider-cam/libcamera/include/libcamera/internal/tracepoints/meson.build b/spider-cam/libcamera/include/libcamera/internal/tracepoints/meson.build new file mode 100644 index 0000000..d9b2fca --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/tracepoints/meson.build @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: CC0-1.0 + +# enum files must go first +tracepoint_files = files([ + 'buffer_enums.tp', + 'request_enums.tp', +]) + +tracepoint_files += files([ + 'pipeline.tp', + 'request.tp', +]) diff --git a/spider-cam/libcamera/include/libcamera/internal/tracepoints/pipeline.tp b/spider-cam/libcamera/include/libcamera/internal/tracepoints/pipeline.tp new file mode 100644 index 0000000..950aa91 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/tracepoints/pipeline.tp @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * pipeline.tp - Tracepoints for pipelines + */ + +TRACEPOINT_EVENT( + libcamera, + ipa_call_begin, + TP_ARGS( + const char *, pipe, + const char *, func + ), + TP_FIELDS( + ctf_string(pipeline_name, pipe) + ctf_string(function_name, func) + ) +) + +TRACEPOINT_EVENT( + libcamera, + ipa_call_end, + TP_ARGS( + const char *, pipe, + const char *, func + ), + TP_FIELDS( + ctf_string(pipeline_name, pipe) + ctf_string(function_name, func) + ) +) diff --git a/spider-cam/libcamera/include/libcamera/internal/tracepoints/request.tp b/spider-cam/libcamera/include/libcamera/internal/tracepoints/request.tp new file mode 100644 index 0000000..4f367e9 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/tracepoints/request.tp @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * request.tp - Tracepoints for the request object + */ + +#include + +#include "libcamera/internal/request.h" + +TRACEPOINT_EVENT_CLASS( + libcamera, + request, + TP_ARGS( + libcamera::Request *, req + ), + TP_FIELDS( + ctf_integer_hex(uintptr_t, request, reinterpret_cast(req)) + ctf_integer(uint64_t, cookie, req->cookie()) + ctf_enum(libcamera, request_status, uint32_t, status, req->status()) + ) +) + +TRACEPOINT_EVENT_INSTANCE( + libcamera, + request, + request_construct, + TP_ARGS( + libcamera::Request *, req + ) +) + +TRACEPOINT_EVENT_INSTANCE( + libcamera, + request, + request_destroy, + TP_ARGS( + libcamera::Request *, req + ) +) + +TRACEPOINT_EVENT_INSTANCE( + libcamera, + request, + request_reuse, + TP_ARGS( + libcamera::Request *, req + ) +) + +TRACEPOINT_EVENT_INSTANCE( + libcamera, + request, + request_queue, + TP_ARGS( + libcamera::Request *, req + ) +) + +TRACEPOINT_EVENT_INSTANCE( + libcamera, + request, + request_device_queue, + TP_ARGS( + libcamera::Request *, req + ) +) + +TRACEPOINT_EVENT_INSTANCE( + libcamera, + request, + request_complete, + TP_ARGS( + libcamera::Request::Private *, req + ) +) + +TRACEPOINT_EVENT_INSTANCE( + libcamera, + request, + request_cancel, + TP_ARGS( + libcamera::Request::Private *, req + ) +) + +TRACEPOINT_EVENT( + libcamera, + request_complete_buffer, + TP_ARGS( + libcamera::Request::Private *, req, + libcamera::FrameBuffer *, buf + ), + TP_FIELDS( + ctf_integer_hex(uintptr_t, request, reinterpret_cast(req)) + ctf_integer(uint64_t, cookie, req->_o()->cookie()) + ctf_integer(int, status, req->_o()->status()) + ctf_integer_hex(uintptr_t, buffer, reinterpret_cast(buf)) + ctf_enum(libcamera, buffer_status, uint32_t, buf_status, buf->metadata().status) + ) +) diff --git a/spider-cam/libcamera/include/libcamera/internal/tracepoints/request_enums.tp b/spider-cam/libcamera/include/libcamera/internal/tracepoints/request_enums.tp new file mode 100644 index 0000000..bcbd1aa --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/tracepoints/request_enums.tp @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * request_enums.tp - Tracepoint definition for enums in the request class + */ + +TRACEPOINT_ENUM( + libcamera, + request_status, + TP_ENUM_VALUES( + ctf_enum_value("RequestPending", 0) + ctf_enum_value("RequestComplete", 1) + ctf_enum_value("RequestCancelled", 2) + ) +) diff --git a/spider-cam/libcamera/include/libcamera/internal/v4l2_device.h b/spider-cam/libcamera/include/libcamera/internal/v4l2_device.h new file mode 100644 index 0000000..f5aa502 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/v4l2_device.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Common base for V4L2 video devices and subdevices + */ + +#pragma once + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include + +#include "libcamera/internal/formats.h" + +namespace libcamera { + +class EventNotifier; + +class V4L2Device : protected Loggable +{ +public: + void close(); + bool isOpen() const { return fd_.isValid(); } + + const ControlInfoMap &controls() const { return controls_; } + + ControlList getControls(const std::vector &ids); + int setControls(ControlList *ctrls); + + const struct v4l2_query_ext_ctrl *controlInfo(uint32_t id) const; + + const std::string &deviceNode() const { return deviceNode_; } + std::string devicePath() const; + + int setFrameStartEnabled(bool enable); + Signal frameStart; + + void updateControlInfo(); + +protected: + V4L2Device(const std::string &deviceNode); + ~V4L2Device(); + + int open(unsigned int flags); + int setFd(UniqueFD fd); + + int ioctl(unsigned long request, void *argp); + + int fd() const { return fd_.get(); } + + template + static std::optional toColorSpace(const T &v4l2Format, + PixelFormatInfo::ColourEncoding colourEncoding); + + template + static int fromColorSpace(const std::optional &colorSpace, T &v4l2Format); + +private: + static ControlType v4l2CtrlType(uint32_t ctrlType); + static std::unique_ptr v4l2ControlId(const v4l2_query_ext_ctrl &ctrl); + std::optional v4l2ControlInfo(const v4l2_query_ext_ctrl &ctrl); + std::optional v4l2MenuControlInfo(const v4l2_query_ext_ctrl &ctrl); + + void listControls(); + void updateControls(ControlList *ctrls, + Span v4l2Ctrls); + + void eventAvailable(); + + std::map controlInfo_; + std::vector> controlIds_; + ControlIdMap controlIdMap_; + ControlInfoMap controls_; + std::string deviceNode_; + UniqueFD fd_; + + EventNotifier *fdEventNotifier_; + bool frameStartEnabled_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/v4l2_pixelformat.h b/spider-cam/libcamera/include/libcamera/internal/v4l2_pixelformat.h new file mode 100644 index 0000000..c836346 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/v4l2_pixelformat.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * Copyright (C) 2020, Raspberry Pi Ltd + * + * V4L2 Pixel Format + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include + +#include + +namespace libcamera { + +class V4L2PixelFormat +{ +public: + struct Info { + PixelFormat format; + const char *description; + }; + + V4L2PixelFormat() + : fourcc_(0) + { + } + + explicit V4L2PixelFormat(uint32_t fourcc) + : fourcc_(fourcc) + { + } + + bool isValid() const { return fourcc_ != 0; } + uint32_t fourcc() const { return fourcc_; } + operator uint32_t() const { return fourcc_; } + + std::string toString() const; + const char *description() const; + + PixelFormat toPixelFormat(bool warn = true) const; + static const std::vector & + fromPixelFormat(const PixelFormat &pixelFormat); + +private: + uint32_t fourcc_; +}; + +std::ostream &operator<<(std::ostream &out, const V4L2PixelFormat &f); + +} /* namespace libcamera */ + +namespace std { + +template<> +struct hash { + size_t operator()(libcamera::V4L2PixelFormat const &format) const noexcept + { + return format.fourcc(); + } +}; + +} /* namespace std */ diff --git a/spider-cam/libcamera/include/libcamera/internal/v4l2_subdevice.h b/spider-cam/libcamera/include/libcamera/internal/v4l2_subdevice.h new file mode 100644 index 0000000..194382f --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/v4l2_subdevice.h @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * V4L2 Subdevice + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include + +#include "libcamera/internal/formats.h" +#include "libcamera/internal/media_object.h" +#include "libcamera/internal/v4l2_device.h" + +namespace libcamera { + +class MediaDevice; + +class MediaBusFormatInfo +{ +public: + enum class Type { + Image, + Metadata, + EmbeddedData, + }; + + bool isValid() const { return code != 0; } + + static const MediaBusFormatInfo &info(uint32_t code); + + const char *name; + uint32_t code; + Type type; + unsigned int bitsPerPixel; + PixelFormatInfo::ColourEncoding colourEncoding; +}; + +struct V4L2SubdeviceCapability final : v4l2_subdev_capability { + bool isReadOnly() const + { + return capabilities & V4L2_SUBDEV_CAP_RO_SUBDEV; + } + bool hasStreams() const + { + return capabilities & V4L2_SUBDEV_CAP_STREAMS; + } +}; + +struct V4L2SubdeviceFormat { + uint32_t code; + Size size; + std::optional colorSpace; + + const std::string toString() const; +}; + +std::ostream &operator<<(std::ostream &out, const V4L2SubdeviceFormat &f); + +class V4L2Subdevice : public V4L2Device +{ +public: + using Formats = std::map>; + + enum Whence { + TryFormat = V4L2_SUBDEV_FORMAT_TRY, + ActiveFormat = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + + struct Stream { + Stream() + : pad(0), stream(0) + { + } + + Stream(unsigned int p, unsigned int s) + : pad(p), stream(s) + { + } + + unsigned int pad; + unsigned int stream; + }; + + struct Route { + Route() + : flags(0) + { + } + + Route(const Stream &snk, const Stream &src, uint32_t f) + : sink(snk), source(src), flags(f) + { + } + + Stream sink; + Stream source; + uint32_t flags; + }; + + using Routing = std::vector; + + explicit V4L2Subdevice(const MediaEntity *entity); + ~V4L2Subdevice(); + + int open(); + + const MediaEntity *entity() const { return entity_; } + + int getSelection(const Stream &stream, unsigned int target, + Rectangle *rect); + int getSelection(unsigned int pad, unsigned int target, Rectangle *rect) + { + return getSelection({ pad, 0 }, target, rect); + } + int setSelection(const Stream &stream, unsigned int target, + Rectangle *rect); + int setSelection(unsigned int pad, unsigned int target, Rectangle *rect) + { + return setSelection({ pad, 0 }, target, rect); + } + + Formats formats(const Stream &stream); + Formats formats(unsigned int pad) + { + return formats({ pad, 0 }); + } + + int getFormat(const Stream &stream, V4L2SubdeviceFormat *format, + Whence whence = ActiveFormat); + int getFormat(unsigned int pad, V4L2SubdeviceFormat *format, + Whence whence = ActiveFormat) + { + return getFormat({ pad, 0 }, format, whence); + } + int setFormat(const Stream &stream, V4L2SubdeviceFormat *format, + Whence whence = ActiveFormat); + int setFormat(unsigned int pad, V4L2SubdeviceFormat *format, + Whence whence = ActiveFormat) + { + return setFormat({ pad, 0 }, format, whence); + } + + int getRouting(Routing *routing, Whence whence = ActiveFormat); + int setRouting(Routing *routing, Whence whence = ActiveFormat); + + const std::string &model(); + const V4L2SubdeviceCapability &caps() const { return caps_; } + + static std::unique_ptr + fromEntityName(const MediaDevice *media, const std::string &entity); + +protected: + std::string logPrefix() const override; + +private: + LIBCAMERA_DISABLE_COPY(V4L2Subdevice) + + std::optional + toColorSpace(const v4l2_mbus_framefmt &format) const; + + std::vector enumPadCodes(const Stream &stream); + std::vector enumPadSizes(const Stream &stream, + unsigned int code); + + int getRoutingLegacy(Routing *routing, Whence whence); + int setRoutingLegacy(Routing *routing, Whence whence); + + const MediaEntity *entity_; + + std::string model_; + struct V4L2SubdeviceCapability caps_; +}; + +bool operator==(const V4L2Subdevice::Stream &lhs, const V4L2Subdevice::Stream &rhs); +static inline bool operator!=(const V4L2Subdevice::Stream &lhs, + const V4L2Subdevice::Stream &rhs) +{ + return !(lhs == rhs); +} + +std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Stream &stream); +std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Route &route); +std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Routing &routing); + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/v4l2_videodevice.h b/spider-cam/libcamera/include/libcamera/internal/v4l2_videodevice.h new file mode 100644 index 0000000..9057be0 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/v4l2_videodevice.h @@ -0,0 +1,313 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * V4L2 Video Device + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "libcamera/internal/formats.h" +#include "libcamera/internal/v4l2_device.h" +#include "libcamera/internal/v4l2_pixelformat.h" + +namespace libcamera { + +class EventNotifier; +class MediaDevice; +class MediaEntity; + +struct V4L2Capability final : v4l2_capability { + const char *driver() const + { + return reinterpret_cast(v4l2_capability::driver); + } + const char *card() const + { + return reinterpret_cast(v4l2_capability::card); + } + const char *bus_info() const + { + return reinterpret_cast(v4l2_capability::bus_info); + } + unsigned int device_caps() const + { + return capabilities & V4L2_CAP_DEVICE_CAPS + ? v4l2_capability::device_caps + : v4l2_capability::capabilities; + } + bool isMultiplanar() const + { + return device_caps() & (V4L2_CAP_VIDEO_CAPTURE_MPLANE | + V4L2_CAP_VIDEO_OUTPUT_MPLANE | + V4L2_CAP_VIDEO_M2M_MPLANE); + } + bool isCapture() const + { + return device_caps() & (V4L2_CAP_VIDEO_CAPTURE | + V4L2_CAP_VIDEO_CAPTURE_MPLANE | + V4L2_CAP_META_CAPTURE); + } + bool isOutput() const + { + return device_caps() & (V4L2_CAP_VIDEO_OUTPUT | + V4L2_CAP_VIDEO_OUTPUT_MPLANE | + V4L2_CAP_META_OUTPUT); + } + bool isVideo() const + { + return device_caps() & (V4L2_CAP_VIDEO_CAPTURE | + V4L2_CAP_VIDEO_CAPTURE_MPLANE | + V4L2_CAP_VIDEO_OUTPUT | + V4L2_CAP_VIDEO_OUTPUT_MPLANE); + } + bool isM2M() const + { + return device_caps() & (V4L2_CAP_VIDEO_M2M | + V4L2_CAP_VIDEO_M2M_MPLANE); + } + bool isMeta() const + { + return device_caps() & (V4L2_CAP_META_CAPTURE | + V4L2_CAP_META_OUTPUT); + } + bool isVideoCapture() const + { + return isVideo() && isCapture(); + } + bool isVideoOutput() const + { + return isVideo() && isOutput(); + } + bool isMetaCapture() const + { + return isMeta() && isCapture(); + } + bool isMetaOutput() const + { + return isMeta() && isOutput(); + } + bool hasStreaming() const + { + return device_caps() & V4L2_CAP_STREAMING; + } + bool hasMediaController() const + { + return device_caps() & V4L2_CAP_IO_MC; + } +}; + +class V4L2BufferCache +{ +public: + V4L2BufferCache(unsigned int numEntries); + V4L2BufferCache(const std::vector> &buffers); + ~V4L2BufferCache(); + + bool isEmpty() const; + int get(const FrameBuffer &buffer); + void put(unsigned int index); + +private: + class Entry + { + public: + Entry(); + Entry(bool free, uint64_t lastUsed, const FrameBuffer &buffer); + + bool operator==(const FrameBuffer &buffer) const; + + bool free_; + uint64_t lastUsed_; + + private: + struct Plane { + Plane(const FrameBuffer::Plane &plane) + : fd(plane.fd.get()), length(plane.length) + { + } + + int fd; + unsigned int length; + }; + + std::vector planes_; + }; + + std::atomic lastUsedCounter_; + std::vector cache_; + /* \todo Expose the miss counter through an instrumentation API. */ + unsigned int missCounter_; +}; + +class V4L2DeviceFormat +{ +public: + struct Plane { + uint32_t size = 0; + uint32_t bpl = 0; + }; + + V4L2PixelFormat fourcc; + Size size; + std::optional colorSpace; + + std::array planes; + unsigned int planesCount = 0; + + const std::string toString() const; +}; + +std::ostream &operator<<(std::ostream &out, const V4L2DeviceFormat &f); + +class V4L2VideoDevice : public V4L2Device +{ +public: + using Formats = std::map>; + + explicit V4L2VideoDevice(const std::string &deviceNode); + explicit V4L2VideoDevice(const MediaEntity *entity); + ~V4L2VideoDevice(); + + int open(); + int open(SharedFD handle, enum v4l2_buf_type type); + void close(); + + const char *driverName() const { return caps_.driver(); } + const char *deviceName() const { return caps_.card(); } + const char *busName() const { return caps_.bus_info(); } + + const V4L2Capability &caps() const { return caps_; } + + int getFormat(V4L2DeviceFormat *format); + int tryFormat(V4L2DeviceFormat *format); + int setFormat(V4L2DeviceFormat *format); + Formats formats(uint32_t code = 0); + + int setSelection(unsigned int target, Rectangle *rect); + + int allocateBuffers(unsigned int count, + std::vector> *buffers); + int exportBuffers(unsigned int count, + std::vector> *buffers); + int importBuffers(unsigned int count); + int releaseBuffers(); + + int queueBuffer(FrameBuffer *buffer); + Signal bufferReady; + + int streamOn(); + int streamOff(); + + void setDequeueTimeout(utils::Duration timeout); + Signal<> dequeueTimeout; + + static std::unique_ptr + fromEntityName(const MediaDevice *media, const std::string &entity); + + V4L2PixelFormat toV4L2PixelFormat(const PixelFormat &pixelFormat) const; + +protected: + std::string logPrefix() const override; + +private: + LIBCAMERA_DISABLE_COPY(V4L2VideoDevice) + + enum class State { + Streaming, + Stopping, + Stopped, + }; + + int initFormats(); + + int getFormatMeta(V4L2DeviceFormat *format); + int trySetFormatMeta(V4L2DeviceFormat *format, bool set); + + int getFormatMultiplane(V4L2DeviceFormat *format); + int trySetFormatMultiplane(V4L2DeviceFormat *format, bool set); + + int getFormatSingleplane(V4L2DeviceFormat *format); + int trySetFormatSingleplane(V4L2DeviceFormat *format, bool set); + + std::vector enumPixelformats(uint32_t code); + std::vector enumSizes(V4L2PixelFormat pixelFormat); + + int requestBuffers(unsigned int count, enum v4l2_memory memoryType); + int createBuffers(unsigned int count, + std::vector> *buffers); + std::unique_ptr createBuffer(unsigned int index); + UniqueFD exportDmabufFd(unsigned int index, unsigned int plane); + + void bufferAvailable(); + FrameBuffer *dequeueBuffer(); + + void watchdogExpired(); + + template + static std::optional toColorSpace(const T &v4l2Format); + + V4L2Capability caps_; + V4L2DeviceFormat format_; + const PixelFormatInfo *formatInfo_; + std::unordered_set pixelFormats_; + + enum v4l2_buf_type bufferType_; + enum v4l2_memory memoryType_; + + V4L2BufferCache *cache_; + std::map queuedBuffers_; + + EventNotifier *fdBufferNotifier_; + + State state_; + std::optional firstFrame_; + + Timer watchdog_; + utils::Duration watchdogDuration_; +}; + +class V4L2M2MDevice +{ +public: + V4L2M2MDevice(const std::string &deviceNode); + ~V4L2M2MDevice(); + + int open(); + void close(); + + V4L2VideoDevice *output() { return output_; } + V4L2VideoDevice *capture() { return capture_; } + +private: + std::string deviceNode_; + + V4L2VideoDevice *output_; + V4L2VideoDevice *capture_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/internal/yaml_parser.h b/spider-cam/libcamera/include/libcamera/internal/yaml_parser.h new file mode 100644 index 0000000..e38a2df --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/internal/yaml_parser.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2022, Google Inc. + * + * libcamera YAML parsing helper + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include + +#include + +namespace libcamera { + +class File; +class YamlParserContext; + +class YamlObject +{ +private: + struct Value { + Value(std::string &&k, std::unique_ptr &&v) + : key(std::move(k)), value(std::move(v)) + { + } + std::string key; + std::unique_ptr value; + }; + + using Container = std::vector; + using ListContainer = std::vector>; + +public: +#ifndef __DOXYGEN__ + template + class Iterator + { + public: + using difference_type = std::ptrdiff_t; + using iterator_category = std::forward_iterator_tag; + + Iterator(typename Container::const_iterator it) + : it_(it) + { + } + + Derived &operator++() + { + ++it_; + return *static_cast(this); + } + + Derived operator++(int) + { + Derived it = *static_cast(this); + it_++; + return it; + } + + friend bool operator==(const Iterator &a, const Iterator &b) + { + return a.it_ == b.it_; + } + + friend bool operator!=(const Iterator &a, const Iterator &b) + { + return a.it_ != b.it_; + } + + protected: + Container::const_iterator it_; + }; + + template + class Adapter + { + public: + Adapter(const Container &container) + : container_(container) + { + } + + Iterator begin() const + { + return Iterator{ container_.begin() }; + } + + Iterator end() const + { + return Iterator{ container_.end() }; + } + + protected: + const Container &container_; + }; + + class ListIterator : public Iterator + { + public: + using value_type = const YamlObject &; + using pointer = const YamlObject *; + using reference = value_type; + + value_type operator*() const + { + return *it_->value.get(); + } + + pointer operator->() const + { + return it_->value.get(); + } + }; + + class DictIterator : public Iterator + { + public: + using value_type = std::pair; + using pointer = value_type *; + using reference = value_type &; + + value_type operator*() const + { + return { it_->key, *it_->value.get() }; + } + }; + + class DictAdapter : public Adapter + { + public: + using key_type = std::string; + }; + + class ListAdapter : public Adapter + { + }; +#endif /* __DOXYGEN__ */ + + YamlObject(); + ~YamlObject(); + + bool isValue() const + { + return type_ == Type::Value; + } + bool isList() const + { + return type_ == Type::List; + } + bool isDictionary() const + { + return type_ == Type::Dictionary; + } + + std::size_t size() const; + + template + std::optional get() const + { + return Getter{}.get(*this); + } + + template + T get(U &&defaultValue) const + { + return get().value_or(std::forward(defaultValue)); + } + +#ifndef __DOXYGEN__ + template || + std::is_same_v || + std::is_same_v || + std::is_same_v || + std::is_same_v || + std::is_same_v || + std::is_same_v || + std::is_same_v || + std::is_same_v || + std::is_same_v || + std::is_same_v> * = nullptr> +#else + template +#endif + std::optional> getList() const; + + DictAdapter asDict() const { return DictAdapter{ list_ }; } + ListAdapter asList() const { return ListAdapter{ list_ }; } + + const YamlObject &operator[](std::size_t index) const; + + bool contains(const std::string &key) const; + const YamlObject &operator[](const std::string &key) const; + +private: + LIBCAMERA_DISABLE_COPY_AND_MOVE(YamlObject) + + template + friend struct Getter; + friend class YamlParserContext; + + enum class Type { + Dictionary, + List, + Value, + }; + + template + struct Getter { + std::optional get(const YamlObject &obj) const; + }; + + Type type_; + + std::string value_; + Container list_; + std::map dictionary_; +}; + +class YamlParser final +{ +public: + static std::unique_ptr parse(File &file); +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/ipa/core.mojom b/spider-cam/libcamera/include/libcamera/ipa/core.mojom new file mode 100644 index 0000000..bce7972 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/ipa/core.mojom @@ -0,0 +1,343 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ + +module libcamera; + +/** + * \file core_ipa_interface.h + * \brief libcamera structs for IPAs + */ + +/* + * Things that can be defined here (and in other mojom files): + * - consts + * - enums + * - structs + * + * Attributes: + * - skipHeader - allowed only for structs and enums in core.mojom + * - Do not generate a C++ definition for the structure or enum + * - Any type used in a mojom interface definition must have a corresponding + * definition in a mojom file for the code generator to accept it, except + * for types solely used as map/array members for which a definition is not + * required + * - This attribute allows defining a symbol for the code generator that + * corresponds to a libcamera type without duplicating its definition in the + * generated C++ headers + * - skipSerdes - structs only, and only in core.mojom + * - All types need a (de)serializer to be defined in order to be transported + * over IPC. The (de)serializer can be: + * - Manually implemented as a template specialization in + * ipa_data_serializer.cpp in the libcamera sources + * - Generated at build time for types defined in a mojom file + * - This attribute instructs the build system that a (de)serializer is + * available for the type and there's no need to generate one + * - hasFd - struct fields or empty structs only + * - Designate that this field or empty struct contains a SharedFD + * - scopedEnum - enum definitions + * - Designate that this enum should be an enum class, as opposed to a pure + * enum + * - flags - struct fields or function parameters that are enums + * - Designate that this enum type E should be Flags in the generated C++ + * code + * - For example, if a struct field is defined as `[flags] ErrorFlag f;` + * (where ErrorFlag is defined as an enum elsewhere in mojom), then the + * generated code for this field will be `Flags f` + * + * Rules: + * - If the type is defined in a libcamera C++ header *and* a (de)serializer is + * available then the type shall be declared as empty with both attributes + * associated and specified as: [skipHeader, skipSerdes] + * - Example: [skipHeader, skipSerdes] ControlList {}; + * - If the type is defined in libcamera but no (de)serializer is available + * then the type definition in the core.mojom file should have the + * [skipHeader] attribute only + * - A (de)serializer will be generated for the type + * - enums that are defined in a libcamera C++ header also fall in this + * category + * - If a type definition has [skipHeader], then the header where the type is + * defined must be included in ipa_interface.h + * - Types that are solely used as array/map members do not require a mojom + * definition if one exists in libcamera + * - Nested types (e.g. FrameBuffer::Plane) cannot be defined in mojom + * - If used in mojom, the nested type shall be defined in a C++ header + * and a (de)serializer shall be provided + * - Nested types can only be used as array/map members + * - When using the type, the C++ namespace separator :: is replaced with a + * dot + * - In example, to use the FrameBuffer::Plane type in mojom: + * - Provide a definition of the FrameBuffer::Plane type in a C++ header + * - Include the header in ipa_interface.h + * - Provide a (de)serializer implementation ipa_data_serializer.cpp + * - In mojom, reference the type as FrameBuffer.Plane and only as map/array + * member + * - [skipHeader] and [skipSerdes] only work here in core.mojom + * - If a field in a struct has a SharedFD, but is not explicitly + * defined so in mojom, then the field must be marked with the [hasFd] + * attribute + * + * \todo Generate documentation from Doxygen comments in .mojom files + * \todo Figure out how to keep the skipHeader structs in sync with their + * C++ definitions, and the skipSerdes structs in sync with their + * (de)serializers + */ +[skipSerdes, skipHeader] struct ControlInfoMap {}; +[skipSerdes, skipHeader] struct ControlList {}; +[skipSerdes, skipHeader] struct SharedFD {}; + +[skipHeader] struct Point { + int32 x; + int32 y; +}; + +[skipHeader] struct Size { + uint32 width; + uint32 height; +}; + +[skipHeader] struct SizeRange { + Size min; + Size max; + uint32 hStep; + uint32 vStep; +}; + +[skipHeader] struct Rectangle { + int32 x; + int32 y; + uint32 width; + uint32 height; +}; + +/** + * \struct IPACameraSensorInfo + * \brief Report the image sensor characteristics + * + * The structure reports image sensor characteristics used by IPA modules to + * tune their algorithms based on the image sensor model currently in use and + * its configuration. + * + * The reported information describes the sensor's intrinsics characteristics, + * such as its pixel array size and the sensor model name, as well as + * information relative to the currently configured mode, such as the produced + * image size and the bit depth of the requested image format. + * + * Instances of this structure are meant to be assembled by the CameraSensor + * class by inspecting the sensor static properties as well as information + * relative to the current configuration. + */ + +/** + * \var IPACameraSensorInfo::model + * \brief The image sensor model name + * + * The sensor model name is a free-formed string that uniquely identifies the + * sensor model. + */ + +/** + * \var IPACameraSensorInfo::bitsPerPixel + * \brief The number of bits per pixel of the image format produced by the + * image sensor + */ + +/** + * \var IPACameraSensorInfo::cfaPattern + * \brief The arrangement of colour filters on the image sensor + * + * This takes a value defined by properties::draft::ColorFilterArrangementEnum. + * For non-Bayer colour sensors, the cfaPattern will be set to + * properties::draft::ColorFilterArrangementEnum::RGB. + * + * \todo Make this variable optional once mojom supports it, instead of using + * RGB for sensors that don't have a CFA. + */ + +/** + * \var IPACameraSensorInfo::activeAreaSize + * \brief The size of the pixel array active area of the sensor + */ + +/** + * \var IPACameraSensorInfo::analogCrop + * \brief The portion of the pixel array active area which is read-out and + * processed + * + * The analog crop rectangle top-left corner is defined as the displacement + * from the top-left corner of the pixel array active area. The rectangle + * horizontal and vertical sizes define the portion of the pixel array which + * is read-out and provided to the sensor's internal processing pipeline, before + * any pixel sub-sampling method, such as pixel binning, skipping and averaging + * take place. + */ + +/** + * \var IPACameraSensorInfo::outputSize + * \brief The size of the images produced by the camera sensor + * + * The output image size defines the horizontal and vertical sizes of the images + * produced by the image sensor. The output image size is defined as the end + * result of the sensor's internal image processing pipeline stages, applied on + * the pixel array portion defined by the analog crop rectangle. Each image + * processing stage that performs pixel sub-sampling techniques, such as pixel + * binning or skipping, or perform any additional digital scaling concur in the + * definition of the output image size. + */ + +/** + * \var IPACameraSensorInfo::pixelRate + * \brief The number of pixels produced in a second + * + * To obtain the read-out time in seconds of a full line: + * + * \verbatim + lineDuration(s) = lineLength(pixels) / pixelRate(pixels per second) + \endverbatim + */ + +/** + * \var IPACameraSensorInfo::minLineLength + * \brief The minimum line length in pixels + * + * The minimum allowable line length in pixel clock periods, including blanking. + */ + +/** + * \var IPACameraSensorInfo::maxLineLength + * \brief The maximum line length in pixels + * + * The maximum allowable line length in pixel clock periods, including blanking. + */ + +/** + * \var IPACameraSensorInfo::minFrameLength + * \brief The minimum allowable frame length in units of lines + * + * The sensor frame length comprises of active output lines and blanking lines + * in a frame. The minimum frame length value dictates the minimum allowable + * frame duration of the sensor mode. + * + * To obtain the minimum frame duration: + * + * \verbatim + frameDuration(s) = minFrameLength(lines) * minLineLength(pixels) / pixelRate(pixels per second) + \endverbatim + */ + +/** + * \var IPACameraSensorInfo::maxFrameLength + * \brief The maximum allowable frame length in units of lines + * + * The sensor frame length comprises of active output lines and blanking lines + * in a frame. The maximum frame length value dictates the maximum allowable + * frame duration of the sensor mode. + * + * To obtain the maximum frame duration: + * + * \verbatim + frameDuration(s) = maxFrameLength(lines) * maxLineLength(pixels) / pixelRate(pixels per second) + \endverbatim + */ +struct IPACameraSensorInfo { + string model; + + uint32 bitsPerPixel; + uint32 cfaPattern; + + Size activeAreaSize; + Rectangle analogCrop; + Size outputSize; + + uint64 pixelRate; + + uint32 minLineLength; + uint32 maxLineLength; + + uint32 minFrameLength; + uint32 maxFrameLength; +}; + +/** + * \struct IPABuffer + * \brief Buffer information for the IPA interface + * + * The IPABuffer structure associates buffer memory with a unique ID. It is + * used to map buffers to the IPA with IPAInterface::mapBuffers(), after which + * buffers will be identified by their ID in the IPA interface. + */ + +/** + * \var IPABuffer::id + * \brief The buffer unique ID + * + * Buffers mapped to the IPA are identified by numerical unique IDs. The IDs + * are chosen by the pipeline handler to fulfil the following constraints: + * + * - IDs shall be positive integers different than zero + * - IDs shall be unique among all mapped buffers + * + * When buffers are unmapped with IPAInterface::unmapBuffers() their IDs are + * freed and may be reused for new buffer mappings. + */ + +/** + * \var IPABuffer::planes + * \brief The buffer planes description + * + * Stores the dmabuf handle and length for each plane of the buffer. + */ +struct IPABuffer { + uint32 id; + [hasFd] array planes; +}; + +/** + * \struct IPASettings + * \brief IPA interface initialization settings + * + * The IPASettings structure stores data passed to the IPAInterface::init() + * function. The data contains settings that don't depend on a particular camera + * or pipeline configuration and are valid for the whole life time of the IPA + * interface. + */ + +/** + * \var IPASettings::configurationFile + * \brief The name of the IPA configuration file + * + * This field may be an empty string if the IPA doesn't require a configuration + * file. + */ + +/** + * \var IPASettings::sensorModel + * \brief The sensor model name + * + * Provides the sensor model name to the IPA. + */ +struct IPASettings { + string configurationFile; + string sensorModel; +}; + +/** + * \struct IPAStream + * \brief Stream configuration for the IPA interface + * + * The IPAStream structure stores stream configuration parameters needed by the + * IPAInterface::configure() function. It mirrors the StreamConfiguration class + * that is not suitable for this purpose due to not being serializable. + */ + +/** + * \var IPAStream::pixelFormat + * \brief The stream pixel format + */ + +/** + * \var IPAStream::size + * \brief The stream size in pixels + */ +struct IPAStream { + uint32 pixelFormat; + Size size; +}; diff --git a/spider-cam/libcamera/include/libcamera/ipa/ipa_controls.h b/spider-cam/libcamera/include/libcamera/ipa/ipa_controls.h new file mode 100644 index 0000000..5fd1339 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/ipa/ipa_controls.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * IPA Control handling + */ + +#pragma once + +#include + +#ifdef __cplusplus +namespace libcamera { + +extern "C" { +#endif + +#define IPA_CONTROLS_FORMAT_VERSION 1 + +enum ipa_controls_id_map_type { + IPA_CONTROL_ID_MAP_CONTROLS, + IPA_CONTROL_ID_MAP_PROPERTIES, + IPA_CONTROL_ID_MAP_V4L2, +}; + +struct ipa_controls_header { + uint32_t version; + uint32_t handle; + uint32_t entries; + uint32_t size; + uint32_t data_offset; + enum ipa_controls_id_map_type id_map_type; + uint32_t reserved[2]; +}; + +struct ipa_control_value_entry { + uint32_t id; + uint8_t type; + uint8_t is_array; + uint16_t count; + uint32_t offset; + uint32_t padding[1]; +}; + +struct ipa_control_info_entry { + uint32_t id; + uint32_t type; + uint32_t offset; + uint32_t padding[1]; +}; + +#ifdef __cplusplus +} /* namespace libcamera */ + +} +#endif diff --git a/spider-cam/libcamera/include/libcamera/ipa/ipa_interface.h b/spider-cam/libcamera/include/libcamera/ipa/ipa_interface.h new file mode 100644 index 0000000..53cf537 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/ipa/ipa_interface.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Image Processing Algorithm interface + */ + +#pragma once + +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include + +namespace libcamera { + +/* + * Structs and enums that are defined in core.mojom that have the skipHeader + * tag must be #included here. + */ + +class IPAInterface +{ +public: + virtual ~IPAInterface() = default; +}; + +extern "C" { +libcamera::IPAInterface *ipaCreate(); +} + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/ipa/ipa_module_info.h b/spider-cam/libcamera/include/libcamera/ipa/ipa_module_info.h new file mode 100644 index 0000000..3507a6d --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/ipa/ipa_module_info.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Image Processing Algorithm module information + */ + +#pragma once + +#include + +#define IPA_MODULE_API_VERSION 1 + +namespace libcamera { + +struct IPAModuleInfo { + int moduleAPIVersion; + uint32_t pipelineVersion; + char pipelineName[256]; + char name[256]; +} __attribute__((packed)); + +extern "C" { +extern const struct IPAModuleInfo ipaModuleInfo; +} + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/ipa/ipu3.mojom b/spider-cam/libcamera/include/libcamera/ipa/ipu3.mojom new file mode 100644 index 0000000..d1b1c6b --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/ipa/ipu3.mojom @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ + +/* + * \todo Document the interface and remove the related EXCLUDE_PATTERNS entry. + */ + +module ipa.ipu3; + +import "include/libcamera/ipa/core.mojom"; + +struct IPAConfigInfo { + libcamera.IPACameraSensorInfo sensorInfo; + libcamera.ControlInfoMap sensorControls; + libcamera.ControlInfoMap lensControls; + libcamera.Size bdsOutputSize; + libcamera.Size iif; +}; + +interface IPAIPU3Interface { + init(libcamera.IPASettings settings, + libcamera.IPACameraSensorInfo sensorInfo, + libcamera.ControlInfoMap sensorControls) + => (int32 ret, libcamera.ControlInfoMap ipaControls); + start() => (int32 ret); + stop(); + + configure(IPAConfigInfo configInfo) + => (int32 ret, libcamera.ControlInfoMap ipaControls); + + mapBuffers(array buffers); + unmapBuffers(array ids); + + [async] queueRequest(uint32 frame, libcamera.ControlList controls); + [async] fillParamsBuffer(uint32 frame, uint32 bufferId); + [async] processStatsBuffer(uint32 frame, int64 frameTimestamp, + uint32 bufferId, libcamera.ControlList sensorControls); +}; + +interface IPAIPU3EventInterface { + setSensorControls(uint32 frame, libcamera.ControlList sensorControls, + libcamera.ControlList lensControls); + paramsBufferReady(uint32 frame); + metadataReady(uint32 frame, libcamera.ControlList metadata); +}; diff --git a/spider-cam/libcamera/include/libcamera/ipa/meson.build b/spider-cam/libcamera/include/libcamera/ipa/meson.build new file mode 100644 index 0000000..3352d08 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/ipa/meson.build @@ -0,0 +1,167 @@ +# SPDX-License-Identifier: CC0-1.0 + +libcamera_ipa_include_dir = libcamera_include_dir / 'ipa' + +libcamera_ipa_headers = files([ + 'ipa_controls.h', + 'ipa_interface.h', + 'ipa_module_info.h', +]) + +install_headers(libcamera_ipa_headers, + subdir : libcamera_ipa_include_dir) + +libcamera_generated_ipa_headers = [] + +ipa_headers_install_dir = get_option('includedir') / libcamera_ipa_include_dir + +# +# Prepare IPA/IPC generation components +# + +core_mojom_file = 'core.mojom' +ipa_mojom_core = custom_target(core_mojom_file.split('.')[0] + '_mojom_module', + input : core_mojom_file, + output : core_mojom_file + '-module', + command : [ + mojom_parser, + '--output-root', meson.project_build_root(), + '--input-root', meson.project_source_root(), + '--mojoms', '@INPUT@' + ]) + +# core_ipa_interface.h +libcamera_generated_ipa_headers += custom_target('core_ipa_interface_h', + input : ipa_mojom_core, + output : 'core_ipa_interface.h', + depends : mojom_templates, + install : true, + install_dir : ipa_headers_install_dir, + command : [ + mojom_generator, 'generate', + '-g', 'libcamera', + '--bytecode_path', mojom_templates_dir, + '--libcamera_generate_core_header', + '--libcamera_output_path=@OUTPUT@', + './' +'@INPUT@' + ]) + +# core_ipa_serializer.h +libcamera_generated_ipa_headers += custom_target('core_ipa_serializer_h', + input : ipa_mojom_core, + output : 'core_ipa_serializer.h', + depends : mojom_templates, + command : [ + mojom_generator, 'generate', + '-g', 'libcamera', + '--bytecode_path', mojom_templates_dir, + '--libcamera_generate_core_serializer', + '--libcamera_output_path=@OUTPUT@', + './' +'@INPUT@' + ]) + +# Mapping from pipeline handler name to mojom file +pipeline_ipa_mojom_mapping = { + 'ipu3': 'ipu3.mojom', + 'rkisp1': 'rkisp1.mojom', + 'rpi/vc4': 'raspberrypi.mojom', + 'simple': 'soft.mojom', + 'vimc': 'vimc.mojom', +} + +# +# Generate headers from templates. +# + +# TODO Define per-pipeline ControlInfoMap with yaml? + +ipa_mojoms = [] +mojoms_built = [] +foreach pipeline, file : pipeline_ipa_mojom_mapping + name = file.split('.')[0] + + # Avoid building duplicate mojom interfaces with the same interface file + if name in mojoms_built + continue + endif + + if pipeline not in pipelines + continue + endif + + mojoms_built += name + + # {interface}.mojom-module + mojom = custom_target(name + '_mojom_module', + input : file, + output : file + '-module', + depends : ipa_mojom_core, + command : [ + mojom_parser, + '--output-root', meson.project_build_root(), + '--input-root', meson.project_source_root(), + '--mojoms', '@INPUT@' + ]) + + # {interface}_ipa_interface.h + header = custom_target(name + '_ipa_interface_h', + input : mojom, + output : name + '_ipa_interface.h', + depends : mojom_templates, + install : true, + install_dir : ipa_headers_install_dir, + command : [ + mojom_generator, 'generate', + '-g', 'libcamera', + '--bytecode_path', mojom_templates_dir, + '--libcamera_generate_header', + '--libcamera_output_path=@OUTPUT@', + './' +'@INPUT@' + ]) + + # {interface}_ipa_serializer.h + serializer = custom_target(name + '_ipa_serializer_h', + input : mojom, + output : name + '_ipa_serializer.h', + depends : mojom_templates, + command : [ + mojom_generator, 'generate', + '-g', 'libcamera', + '--bytecode_path', mojom_templates_dir, + '--libcamera_generate_serializer', + '--libcamera_output_path=@OUTPUT@', + './' +'@INPUT@' + ]) + + # {interface}_ipa_proxy.h + proxy_header = custom_target(name + '_proxy_h', + input : mojom, + output : name + '_ipa_proxy.h', + depends : mojom_templates, + command : [ + mojom_generator, 'generate', + '-g', 'libcamera', + '--bytecode_path', mojom_templates_dir, + '--libcamera_generate_proxy_h', + '--libcamera_output_path=@OUTPUT@', + './' +'@INPUT@' + ]) + + ipa_mojoms += { + 'name': name, + 'mojom': mojom, + } + + libcamera_generated_ipa_headers += [header, serializer, proxy_header] +endforeach + +ipa_mojom_files = [] +foreach pipeline, file : pipeline_ipa_mojom_mapping + if file not in ipa_mojom_files + ipa_mojom_files += file + endif +endforeach +ipa_mojom_files = files(ipa_mojom_files) + +# Pass this to the documentation generator in src/libcamera/ipa +ipa_mojom_files += files(['core.mojom']) diff --git a/spider-cam/libcamera/include/libcamera/ipa/raspberrypi.mojom b/spider-cam/libcamera/include/libcamera/ipa/raspberrypi.mojom new file mode 100644 index 0000000..5986c43 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/ipa/raspberrypi.mojom @@ -0,0 +1,301 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ + +/* + * \todo Document the interface and remove the related EXCLUDE_PATTERNS entry. + */ + +module ipa.RPi; + +import "include/libcamera/ipa/core.mojom"; + +/* Size of the LS grid allocation on VC4. */ +const uint32 MaxLsGridSize = 0x8000; + +struct SensorConfig { + uint32 gainDelay; + uint32 exposureDelay; + uint32 vblankDelay; + uint32 hblankDelay; + uint32 sensorMetadata; +}; + +struct InitParams { + bool lensPresent; + libcamera.IPACameraSensorInfo sensorInfo; + /* PISP specific */ + libcamera.SharedFD fe; + libcamera.SharedFD be; +}; + +struct InitResult { + SensorConfig sensorConfig; + libcamera.ControlInfoMap controlInfo; +}; + +struct BufferIds { + uint32 bayer; + uint32 embedded; + uint32 stats; +}; + +struct ConfigParams { + uint32 transform; + libcamera.ControlInfoMap sensorControls; + libcamera.ControlInfoMap ispControls; + libcamera.ControlInfoMap lensControls; + /* VC4 specific */ + libcamera.SharedFD lsTableHandle; +}; + +struct ConfigResult { + float modeSensitivity; + libcamera.ControlInfoMap controlInfo; + libcamera.ControlList sensorControls; + libcamera.ControlList lensControls; +}; + +struct StartResult { + libcamera.ControlList controls; + int32 dropFrameCount; +}; + +struct PrepareParams { + BufferIds buffers; + libcamera.ControlList sensorControls; + libcamera.ControlList requestControls; + uint32 ipaContext; + uint32 delayContext; +}; + +struct ProcessParams { + BufferIds buffers; + uint32 ipaContext; +}; + +interface IPARPiInterface { + /** + * \fn init() + * \brief Initialise the IPA + * \param[in] settings Camera sensor information and configuration file + * \param[in] params Platform specific initialisation parameters + * \param[out] ret 0 on success or a negative error code otherwise + * \param[out] result Static sensor configuration and controls available + * + * This function initialises the IPA for a particular sensor from the + * pipeline handler. + * + * The \a settings conveys information about the camera sensor and + * configuration file requested by the pipeline handler. + * + * The \a result parameter returns the sensor delay for the given camera + * as well as a ControlInfoMap of available controls that can be handled + * by the IPA. + */ + init(libcamera.IPASettings settings, InitParams params) + => (int32 ret, InitResult result); + + /** + * \fn start() + * \brief Start the IPA + * \param[in] controls List of control to handle + * \param[out] result Controls to apply and number of dropped frames + * + * This function sets the IPA to a started state. + * + * The \a controls provide a list of controls to handle immediately. The + * actual controls to apply on the sensor and ISP in the pipeline + * handler are returned in \a result. + * + * The number of convergence frames to be dropped is also returned in + * \a result. + */ + start(libcamera.ControlList controls) => (StartResult result); + + /** + * \fn start() + * \brief Stop the IPA + * + * This function sets the IPA to a stopped state. + */ + stop(); + + /** + * \fn configure() + * \brief Configure the IPA + * \param[in] sensorInfo Sensor mode configuration + * \param[in] params Platform configuration parameters + * \param[out] ret 0 on success or a negative error code otherwise + * \param[out] result Results of the configuration operation + * + * This function configures the IPA for a particular camera + * configuration + * + * The \a params parameter provides a list of available controls for the + * ISP, sensor and lens devices, and the user requested transform + * operation. It can also provide platform specific configuration + * parameters, e.g. the lens shading table memory handle for VC4. + * + * The \a result parameter returns the available controls for the given + * camera mode, a list of controls to apply to the sensor device, and + * the requested mode's sensitivity characteristics. + */ + configure(libcamera.IPACameraSensorInfo sensorInfo, ConfigParams params) + => (int32 ret, ConfigResult result); + + /** + * \fn mapBuffers() + * \brief Map buffers shared between the pipeline handler and the IPA + * \param[in] buffers List of buffers to map + * + * This function informs the IPA module of memory buffers set up by the + * pipeline handler that the IPA needs to access. It provides dmabuf + * file handles for each buffer, and associates the buffers with unique + * numerical IDs. + * + * IPAs shall map the dmabuf file handles to their address space and + * keep a cache of the mappings, indexed by the buffer numerical IDs. + * The IDs are used in all other IPA interface functions to refer to + * buffers, including the unmapBuffers() function. + * + * All buffers that the pipeline handler wishes to share with an IPA + * shall be mapped with this function. Buffers may be mapped all at once + * with a single call, or mapped and unmapped dynamically at runtime, + * depending on the IPA protocol. Regardless of the protocol, all + * buffers mapped at a given time shall have unique numerical IDs. + * + * The numerical IDs have no meaning defined by the IPA interface, and + * should be treated as opaque handles by IPAs, with the only exception + * that ID zero is invalid. + * + * \sa unmapBuffers() + */ + mapBuffers(array buffers); + + /** + * \fn unmapBuffers() + * \brief Unmap buffers shared by the pipeline to the IPA + * \param[in] ids List of buffer IDs to unmap + * + * This function removes mappings set up with mapBuffers(). Numerical + * IDs of unmapped buffers may be reused when mapping new buffers. + * + * \sa mapBuffers() + */ + unmapBuffers(array ids); + + /** + * \fn prepareIsp() + * \brief Prepare the ISP configuration for a frame + * \param[in] params Parameter set for the frame to process + * + * This function call into all the algorithms in preparation for the + * frame to be processed by the ISP. + * + * The \a params parameter lists the buffer IDs for the Bayer and + * embedded data buffers, a ControlList of sensor frame params, and + * a ControlList of request controls for the current frame. + * + * Additionally, \a params also contains the IPA context (ipaContext) to + * use as an index location to store control algorithm results, and a + * historical IPA context (delayContext) that was active when the sensor + * settings were requested by the IPA. + */ + [async] prepareIsp(PrepareParams params); + + /** + * \fn processStats() + * \brief Process the statistics provided by the ISP + * \param[in] params Parameter set for the statistics to process + * + * This function call into all the algorithms to provide the statistics + * generated by the ISP for the processed frame. + * + * The \a params parameter lists the buffer ID for the statistics buffer + * and an IPA context (ipaContext) to use as an index location to store + * algorithm results. + */ + [async] processStats(ProcessParams params); +}; + +interface IPARPiEventInterface { + /** + * \fn prepareIspComplete() + * \brief Signal completion of \a prepareIsp + * \param[in] buffers Bayer and embedded buffers actioned. + * \param[in] stitchSwapBuffers Whether the stitch block buffers need to be swapped. + * + * This asynchronous event is signalled to the pipeline handler once + * the \a prepareIsp signal has completed, and the ISP is ready to start + * processing the frame. The embedded data buffer may be recycled after + * this event. + */ + prepareIspComplete(BufferIds buffers, bool stitchSwapBuffers); + + /** + * \fn processStatsComplete() + * \brief Signal completion of \a processStats + * \param[in] buffers Statistics buffers actioned. + * + * This asynchronous event is signalled to the pipeline handler once + * the \a processStats signal has completed. The statistics buffer may + * be recycled after this event. + */ + processStatsComplete(BufferIds buffers); + + /** + * \fn metadataReady() + * \brief Signal request metadata is to be merged + * \param[in] metadata Control list of metadata to be merged + * + * This asynchronous event is signalled to the pipeline handler once + * all the frame metadata has been gathered. The pipeline handler will + * copy or merge this metadata into the \a Request returned back to the + * application. + */ + metadataReady(libcamera.ControlList metadata); + + /** + * \fn setIspControls() + * \brief Signal ISP controls to be applied. + * \param[in] controls List of controls to be applied. + * + * This asynchronous event is signalled to the pipeline handler during + * the \a prepareISP signal after all algorithms have been run and the + * IPA requires ISP controls to be applied for the frame. + */ + setIspControls(libcamera.ControlList controls); + + /** + * \fn setDelayedControls() + * \brief Signal Sensor controls to be applied. + * \param[in] controls List of controls to be applied. + * \param[in] delayContext IPA context index used for this request + * + * This asynchronous event is signalled to the pipeline handler when + * the IPA requires sensor specific controls (e.g. shutter speed, gain, + * blanking) to be applied. + */ + setDelayedControls(libcamera.ControlList controls, uint32 delayContext); + + /** + * \fn setLensControls() + * \brief Signal lens controls to be applied. + * \param[in] controls List of controls to be applied. + * + * This asynchronous event is signalled to the pipeline handler when + * the IPA requires a lens movement control to be applied. + */ + setLensControls(libcamera.ControlList controls); + + /** + * \fn setCameraTimeout() + * \brief Request a watchdog timeout value to use + * \param[in] maxFrameLengthMs Timeout value in ms + * + * This asynchronous event is used by the IPA to inform the pipeline + * handler of an acceptable watchdog timer value to use for the sensor + * stream. This value is based on the history of frame lengths requested + * by the IPA. + */ + setCameraTimeout(uint32 maxFrameLengthMs); +}; diff --git a/spider-cam/libcamera/include/libcamera/ipa/rkisp1.mojom b/spider-cam/libcamera/include/libcamera/ipa/rkisp1.mojom new file mode 100644 index 0000000..1009e97 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/ipa/rkisp1.mojom @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ + +/* + * \todo Document the interface and remove the related EXCLUDE_PATTERNS entry. + */ + +module ipa.rkisp1; + +import "include/libcamera/ipa/core.mojom"; + +struct IPAConfigInfo { + libcamera.IPACameraSensorInfo sensorInfo; + libcamera.ControlInfoMap sensorControls; +}; + +interface IPARkISP1Interface { + init(libcamera.IPASettings settings, + uint32 hwRevision, + libcamera.IPACameraSensorInfo sensorInfo, + libcamera.ControlInfoMap sensorControls) + => (int32 ret, libcamera.ControlInfoMap ipaControls); + start() => (int32 ret); + stop(); + + configure(IPAConfigInfo configInfo, + map streamConfig) + => (int32 ret, libcamera.ControlInfoMap ipaControls); + + mapBuffers(array buffers); + unmapBuffers(array ids); + + [async] queueRequest(uint32 frame, libcamera.ControlList reqControls); + [async] fillParamsBuffer(uint32 frame, uint32 bufferId); + [async] processStatsBuffer(uint32 frame, uint32 bufferId, + libcamera.ControlList sensorControls); +}; + +interface IPARkISP1EventInterface { + paramsBufferReady(uint32 frame); + setSensorControls(uint32 frame, libcamera.ControlList sensorControls); + metadataReady(uint32 frame, libcamera.ControlList metadata); +}; diff --git a/spider-cam/libcamera/include/libcamera/ipa/soft.mojom b/spider-cam/libcamera/include/libcamera/ipa/soft.mojom new file mode 100644 index 0000000..3aa2066 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/ipa/soft.mojom @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ + +/* + * \todo Document the interface and remove the related EXCLUDE_PATTERNS entry. + */ + +module ipa.soft; + +import "include/libcamera/ipa/core.mojom"; + +interface IPASoftInterface { + init(libcamera.IPASettings settings, + libcamera.SharedFD fdStats, + libcamera.SharedFD fdParams, + libcamera.ControlInfoMap sensorCtrlInfoMap) + => (int32 ret); + start() => (int32 ret); + stop(); + configure(libcamera.ControlInfoMap sensorCtrlInfoMap) + => (int32 ret); + + [async] processStats(libcamera.ControlList sensorControls); +}; + +interface IPASoftEventInterface { + setSensorControls(libcamera.ControlList sensorControls); + setIspParams(); +}; diff --git a/spider-cam/libcamera/include/libcamera/ipa/vimc.mojom b/spider-cam/libcamera/include/libcamera/ipa/vimc.mojom new file mode 100644 index 0000000..dd991f7 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/ipa/vimc.mojom @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ + +/* + * \todo Document the interface and remove the related EXCLUDE_PATTERNS entry. + */ + +module ipa.vimc; + +import "include/libcamera/ipa/core.mojom"; + +const string VimcIPAFIFOPath = "/tmp/libcamera_ipa_vimc_fifo"; + +enum IPAOperationCode { + IPAOperationNone, + IPAOperationInit, + IPAOperationStart, + IPAOperationStop, +}; + +[scopedEnum] enum TestFlag { + Flag1 = 0x1, + Flag2 = 0x2, + Flag3 = 0x4, + Flag4 = 0x8, +}; + +interface IPAVimcInterface { + init(libcamera.IPASettings settings, + IPAOperationCode code, + [flags] TestFlag inFlags) + => (int32 ret, [flags] TestFlag outFlags); + + configure(libcamera.IPACameraSensorInfo sensorInfo, + map streamConfig, + map entityControls) => (int32 ret); + + start() => (int32 ret); + stop(); + + mapBuffers(array buffers); + unmapBuffers(array ids); + + [async] queueRequest(uint32 frame, libcamera.ControlList controls); + /* + * The vimc driver doesn't use parameters buffers. To maximize coverage + * of unit tests that rely on the VIMC pipeline handler, we still define + * interface functions that mimick how other pipeline handlers typically + * handle parameters at runtime. + */ + [async] fillParamsBuffer(uint32 frame, uint32 bufferId); +}; + +interface IPAVimcEventInterface { + paramsBufferReady(uint32 bufferId, [flags] TestFlag flags); +}; diff --git a/spider-cam/libcamera/include/libcamera/logging.h b/spider-cam/libcamera/include/libcamera/logging.h new file mode 100644 index 0000000..e334d87 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/logging.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Logging infrastructure + */ + +#pragma once + +namespace libcamera { + +enum LoggingTarget { + LoggingTargetNone, + LoggingTargetSyslog, + LoggingTargetFile, + LoggingTargetStream, +}; + +int logSetFile(const char *path, bool color = false); +int logSetStream(std::ostream *stream, bool color = false); +int logSetTarget(LoggingTarget target); +void logSetLevel(const char *category, const char *level); + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/meson.build b/spider-cam/libcamera/include/libcamera/meson.build new file mode 100644 index 0000000..84c6c4c --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/meson.build @@ -0,0 +1,126 @@ +# SPDX-License-Identifier: CC0-1.0 + +libcamera_include_dir = 'libcamera' / 'libcamera' + +libcamera_public_headers = files([ + 'camera.h', + 'camera_manager.h', + 'color_space.h', + 'controls.h', + 'fence.h', + 'framebuffer.h', + 'framebuffer_allocator.h', + 'geometry.h', + 'logging.h', + 'orientation.h', + 'pixel_format.h', + 'request.h', + 'stream.h', + 'transform.h', +]) + +subdir('base') +subdir('internal') +subdir('ipa') + +install_headers(libcamera_public_headers, + subdir : libcamera_include_dir) + +# +# Generate headers from templates. +# + +libcamera_headers_install_dir = get_option('includedir') / libcamera_include_dir + +controls_map = { + 'controls': { + 'draft': 'control_ids_draft.yaml', + 'core': 'control_ids_core.yaml', + 'rpi/vc4': 'control_ids_rpi.yaml', + }, + + 'properties': { + 'draft': 'property_ids_draft.yaml', + 'core': 'property_ids_core.yaml', + } +} + +control_headers = [] +controls_files = [] +properties_files = [] + +foreach mode, entry : controls_map + files_list = [] + input_files = [] + foreach vendor, header : entry + if vendor != 'core' and vendor != 'draft' + if vendor not in pipelines + continue + endif + endif + + if header in files_list + continue + endif + + files_list += header + input_files += files('../../src/libcamera/' + header) + endforeach + + outfile = '' + if mode == 'controls' + outfile = 'control_ids.h' + controls_files += files_list + else + outfile = 'property_ids.h' + properties_files += files_list + endif + + template_file = files(outfile + '.in') + ranges_file = files('../../src/libcamera/control_ranges.yaml') + control_headers += custom_target(header + '_h', + input : input_files, + output : outfile, + command : [gen_controls, '-o', '@OUTPUT@', + '--mode', mode, '-t', template_file, + '-r', ranges_file, '@INPUT@'], + install : true, + install_dir : libcamera_headers_install_dir) +endforeach + +libcamera_public_headers += control_headers + +# formats.h +formats_h = custom_target('formats_h', + input : files( + '../../src/libcamera/formats.yaml', + 'formats.h.in', + '../linux/drm_fourcc.h' + ), + output : 'formats.h', + command : [gen_formats, '-o', '@OUTPUT@', '@INPUT@'], + install : true, + install_dir : libcamera_headers_install_dir) +libcamera_public_headers += formats_h + +# libcamera.h +libcamera_h = custom_target('gen-header', + input : 'meson.build', + output : 'libcamera.h', + command : [gen_header, meson.current_source_dir(), '@OUTPUT@'], + install : true, + install_dir : libcamera_headers_install_dir) + +libcamera_public_headers += libcamera_h + +# version.h +version = libcamera_version.split('.') +libcamera_version_config = configuration_data() +libcamera_version_config.set('LIBCAMERA_VERSION_MAJOR', version[0]) +libcamera_version_config.set('LIBCAMERA_VERSION_MINOR', version[1]) +libcamera_version_config.set('LIBCAMERA_VERSION_PATCH', version[2]) + +configure_file(input : 'version.h.in', + output : 'version.h', + configuration : libcamera_version_config, + install_dir : libcamera_headers_install_dir) diff --git a/spider-cam/libcamera/include/libcamera/orientation.h b/spider-cam/libcamera/include/libcamera/orientation.h new file mode 100644 index 0000000..a3b40e6 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/orientation.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2023, Ideas On Board Oy + * + * Image orientation + */ + +#pragma once + +#include + +namespace libcamera { + +enum class Orientation { + /* EXIF tag 274 starts from '1' */ + Rotate0 = 1, + Rotate0Mirror, + Rotate180, + Rotate180Mirror, + Rotate90Mirror, + Rotate270, + Rotate270Mirror, + Rotate90, +}; + +Orientation orientationFromRotation(int angle, bool *success = nullptr); + +std::ostream &operator<<(std::ostream &out, const Orientation &orientation); + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/pixel_format.h b/spider-cam/libcamera/include/libcamera/pixel_format.h new file mode 100644 index 0000000..ea60fe7 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/pixel_format.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * libcamera Pixel Format + */ + +#pragma once + +#include +#include +#include +#include + +namespace libcamera { + +class PixelFormat +{ +public: + constexpr PixelFormat() + : fourcc_(0), modifier_(0) + { + } + + explicit constexpr PixelFormat(uint32_t fourcc, uint64_t modifier = 0) + : fourcc_(fourcc), modifier_(modifier) + { + } + + bool operator==(const PixelFormat &other) const; + bool operator!=(const PixelFormat &other) const { return !(*this == other); } + bool operator<(const PixelFormat &other) const; + + constexpr bool isValid() const { return fourcc_ != 0; } + + constexpr operator uint32_t() const { return fourcc_; } + constexpr uint32_t fourcc() const { return fourcc_; } + constexpr uint64_t modifier() const { return modifier_; } + + std::string toString() const; + + static PixelFormat fromString(const std::string &name); + +private: + uint32_t fourcc_; + uint64_t modifier_; +}; + +std::ostream &operator<<(std::ostream &out, const PixelFormat &f); + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/property_ids.h.in b/spider-cam/libcamera/include/libcamera/property_ids.h.in new file mode 100644 index 0000000..e6edabc --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/property_ids.h.in @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Property ID list + * + * This file is auto-generated. Do not edit. + */ + +#pragma once + +#include +#include +#include + +#include + +namespace libcamera { + +namespace properties { + +enum { +${ids} +}; + +${controls} + +extern const ControlIdMap properties; + +${vendor_controls} + +} /* namespace properties */ + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/request.h b/spider-cam/libcamera/include/libcamera/request.h new file mode 100644 index 0000000..2c78d9b --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/request.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Capture request handling + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +namespace libcamera { + +class Camera; +class CameraControlValidator; +class FrameBuffer; +class Stream; + +class Request : public Extensible +{ + LIBCAMERA_DECLARE_PRIVATE() + +public: + enum Status { + RequestPending, + RequestComplete, + RequestCancelled, + }; + + enum ReuseFlag { + Default = 0, + ReuseBuffers = (1 << 0), + }; + + using BufferMap = std::map; + + Request(Camera *camera, uint64_t cookie = 0); + ~Request(); + + void reuse(ReuseFlag flags = Default); + + ControlList &controls() { return *controls_; } + ControlList &metadata() { return *metadata_; } + const BufferMap &buffers() const { return bufferMap_; } + int addBuffer(const Stream *stream, FrameBuffer *buffer, + std::unique_ptr fence = nullptr); + FrameBuffer *findBuffer(const Stream *stream) const; + + uint32_t sequence() const; + uint64_t cookie() const { return cookie_; } + Status status() const { return status_; } + + bool hasPendingBuffers() const; + + std::string toString() const; + +private: + LIBCAMERA_DISABLE_COPY(Request) + + ControlList *controls_; + ControlList *metadata_; + BufferMap bufferMap_; + + const uint64_t cookie_; + Status status_; +}; + +std::ostream &operator<<(std::ostream &out, const Request &r); + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/stream.h b/spider-cam/libcamera/include/libcamera/stream.h new file mode 100644 index 0000000..d510238 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/stream.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Video stream for a Camera + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace libcamera { + +class Camera; +class Stream; + +class StreamFormats +{ +public: + StreamFormats(); + StreamFormats(const std::map> &formats); + + std::vector pixelformats() const; + std::vector sizes(const PixelFormat &pixelformat) const; + + SizeRange range(const PixelFormat &pixelformat) const; + +private: + std::map> formats_; +}; + +struct StreamConfiguration { + StreamConfiguration(); + StreamConfiguration(const StreamFormats &formats); + + PixelFormat pixelFormat; + Size size; + unsigned int stride; + unsigned int frameSize; + + unsigned int bufferCount; + + std::optional colorSpace; + + Stream *stream() const { return stream_; } + void setStream(Stream *stream) { stream_ = stream; } + const StreamFormats &formats() const { return formats_; } + + std::string toString() const; + +private: + Stream *stream_; + StreamFormats formats_; +}; + +enum class StreamRole { + Raw, + StillCapture, + VideoRecording, + Viewfinder, +}; + +std::ostream &operator<<(std::ostream &out, StreamRole role); + +class Stream +{ +public: + Stream(); + + const StreamConfiguration &configuration() const { return configuration_; } + +protected: + friend class Camera; + + StreamConfiguration configuration_; +}; + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/transform.h b/spider-cam/libcamera/include/libcamera/transform.h new file mode 100644 index 0000000..a88f809 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/transform.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Raspberry Pi Ltd + * + * 2D plane transforms + */ + +#pragma once + +#include + +namespace libcamera { + +enum class Orientation; + +enum class Transform : int { + Identity = 0, + Rot0 = Identity, + HFlip = 1, + VFlip = 2, + HVFlip = HFlip | VFlip, + Rot180 = HVFlip, + Transpose = 4, + Rot270 = HFlip | Transpose, + Rot90 = VFlip | Transpose, + Rot180Transpose = HFlip | VFlip | Transpose +}; + +constexpr Transform operator&(Transform t0, Transform t1) +{ + return static_cast(static_cast(t0) & static_cast(t1)); +} + +constexpr Transform operator|(Transform t0, Transform t1) +{ + return static_cast(static_cast(t0) | static_cast(t1)); +} + +constexpr Transform operator^(Transform t0, Transform t1) +{ + return static_cast(static_cast(t0) ^ static_cast(t1)); +} + +constexpr Transform &operator&=(Transform &t0, Transform t1) +{ + return t0 = t0 & t1; +} + +constexpr Transform &operator|=(Transform &t0, Transform t1) +{ + return t0 = t0 | t1; +} + +constexpr Transform &operator^=(Transform &t0, Transform t1) +{ + return t0 = t0 ^ t1; +} + +Transform operator*(Transform t0, Transform t1); + +Transform operator-(Transform t); + +constexpr bool operator!(Transform t) +{ + return t == Transform::Identity; +} + +constexpr Transform operator~(Transform t) +{ + return static_cast(~static_cast(t) & 7); +} + +Transform transformFromRotation(int angle, bool *success = nullptr); + +Transform operator/(const Orientation &o1, const Orientation &o2); +Orientation operator*(const Orientation &o, const Transform &t); + +const char *transformToString(Transform t); + +} /* namespace libcamera */ diff --git a/spider-cam/libcamera/include/libcamera/version.h.in b/spider-cam/libcamera/include/libcamera/version.h.in new file mode 100644 index 0000000..50bf100 --- /dev/null +++ b/spider-cam/libcamera/include/libcamera/version.h.in @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Library version information + * + * This file is auto-generated. Do not edit. + */ + +#pragma once + +#define LIBCAMERA_VERSION_MAJOR @LIBCAMERA_VERSION_MAJOR@ +#define LIBCAMERA_VERSION_MINOR @LIBCAMERA_VERSION_MINOR@ +#define LIBCAMERA_VERSION_PATCH @LIBCAMERA_VERSION_PATCH@ diff --git a/spider-cam/libcamera/include/linux/README b/spider-cam/libcamera/include/linux/README new file mode 100644 index 0000000..b779530 --- /dev/null +++ b/spider-cam/libcamera/include/linux/README @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: CC0-1.0 + +Files in this directory are imported from v6.10-rc1 of the Linux kernel. Do not +modify them manually. diff --git a/spider-cam/libcamera/include/linux/bcm2835-isp.h b/spider-cam/libcamera/include/linux/bcm2835-isp.h new file mode 100644 index 0000000..5f0f78e --- /dev/null +++ b/spider-cam/libcamera/include/linux/bcm2835-isp.h @@ -0,0 +1,346 @@ +/* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* + * bcm2835-isp.h + * + * BCM2835 ISP driver - user space header file. + * + * Copyright © 2019-2020 Raspberry Pi Ltd + * + * Author: Naushir Patuck (naush@raspberrypi.com) + * + */ + +#ifndef __BCM2835_ISP_H_ +#define __BCM2835_ISP_H_ + +#include + +#define V4L2_CID_USER_BCM2835_ISP_CC_MATRIX \ + (V4L2_CID_USER_BCM2835_ISP_BASE + 0x0001) +#define V4L2_CID_USER_BCM2835_ISP_LENS_SHADING \ + (V4L2_CID_USER_BCM2835_ISP_BASE + 0x0002) +#define V4L2_CID_USER_BCM2835_ISP_BLACK_LEVEL \ + (V4L2_CID_USER_BCM2835_ISP_BASE + 0x0003) +#define V4L2_CID_USER_BCM2835_ISP_GEQ \ + (V4L2_CID_USER_BCM2835_ISP_BASE + 0x0004) +#define V4L2_CID_USER_BCM2835_ISP_GAMMA \ + (V4L2_CID_USER_BCM2835_ISP_BASE + 0x0005) +#define V4L2_CID_USER_BCM2835_ISP_DENOISE \ + (V4L2_CID_USER_BCM2835_ISP_BASE + 0x0006) +#define V4L2_CID_USER_BCM2835_ISP_SHARPEN \ + (V4L2_CID_USER_BCM2835_ISP_BASE + 0x0007) +#define V4L2_CID_USER_BCM2835_ISP_DPC \ + (V4L2_CID_USER_BCM2835_ISP_BASE + 0x0008) +#define V4L2_CID_USER_BCM2835_ISP_CDN \ + (V4L2_CID_USER_BCM2835_ISP_BASE + 0x0009) +/* + * All structs below are directly mapped onto the equivalent structs in + * drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h + * for convenience. + */ + +/** + * struct bcm2835_isp_rational - Rational value type. + * + * @num: Numerator. + * @den: Denominator. + */ +struct bcm2835_isp_rational { + __s32 num; + __u32 den; +}; + +/** + * struct bcm2835_isp_ccm - Colour correction matrix. + * + * @ccm: 3x3 correction matrix coefficients. + * @offsets: 1x3 correction offsets. + */ +struct bcm2835_isp_ccm { + struct bcm2835_isp_rational ccm[3][3]; + __s32 offsets[3]; +}; + +/** + * struct bcm2835_isp_custom_ccm - Custom CCM applied with the + * V4L2_CID_USER_BCM2835_ISP_CC_MATRIX ctrl. + * + * @enabled: Enable custom CCM. + * @ccm: Custom CCM coefficients and offsets. + */ +struct bcm2835_isp_custom_ccm { + __u32 enabled; + struct bcm2835_isp_ccm ccm; +}; + +/** + * enum bcm2835_isp_gain_format - format of the gains in the lens shading + * tables used with the + * V4L2_CID_USER_BCM2835_ISP_LENS_SHADING ctrl. + * + * @GAIN_FORMAT_U0P8_1: Gains are u0.8 format, starting at 1.0 + * @GAIN_FORMAT_U1P7_0: Gains are u1.7 format, starting at 0.0 + * @GAIN_FORMAT_U1P7_1: Gains are u1.7 format, starting at 1.0 + * @GAIN_FORMAT_U2P6_0: Gains are u2.6 format, starting at 0.0 + * @GAIN_FORMAT_U2P6_1: Gains are u2.6 format, starting at 1.0 + * @GAIN_FORMAT_U3P5_0: Gains are u3.5 format, starting at 0.0 + * @GAIN_FORMAT_U3P5_1: Gains are u3.5 format, starting at 1.0 + * @GAIN_FORMAT_U4P10: Gains are u4.10 format, starting at 0.0 + */ +enum bcm2835_isp_gain_format { + GAIN_FORMAT_U0P8_1 = 0, + GAIN_FORMAT_U1P7_0 = 1, + GAIN_FORMAT_U1P7_1 = 2, + GAIN_FORMAT_U2P6_0 = 3, + GAIN_FORMAT_U2P6_1 = 4, + GAIN_FORMAT_U3P5_0 = 5, + GAIN_FORMAT_U3P5_1 = 6, + GAIN_FORMAT_U4P10 = 7, +}; + +/** + * struct bcm2835_isp_lens_shading - Lens shading tables supplied with the + * V4L2_CID_USER_BCM2835_ISP_LENS_SHADING + * ctrl. + * + * @enabled: Enable lens shading. + * @grid_cell_size: Size of grid cells in samples (16, 32, 64, 128 or 256). + * @grid_width: Width of lens shading tables in grid cells. + * @grid_stride: Row to row distance (in grid cells) between grid cells + * in the same horizontal location. + * @grid_height: Height of lens shading tables in grid cells. + * @dmabuf: dmabuf file handle containing the table. + * @ref_transform: Reference transform - unsupported, please pass zero. + * @corner_sampled: Whether the gains are sampled at the corner points + * of the grid cells or in the cell centres. + * @gain_format: Format of the gains (see enum &bcm2835_isp_gain_format). + */ +struct bcm2835_isp_lens_shading { + __u32 enabled; + __u32 grid_cell_size; + __u32 grid_width; + __u32 grid_stride; + __u32 grid_height; + __s32 dmabuf; + __u32 ref_transform; + __u32 corner_sampled; + __u32 gain_format; +}; + +/** + * struct bcm2835_isp_black_level - Sensor black level set with the + * V4L2_CID_USER_BCM2835_ISP_BLACK_LEVEL ctrl. + * + * @enabled: Enable black level. + * @black_level_r: Black level for red channel. + * @black_level_g: Black level for green channels. + * @black_level_b: Black level for blue channel. + */ +struct bcm2835_isp_black_level { + __u32 enabled; + __u16 black_level_r; + __u16 black_level_g; + __u16 black_level_b; + __u8 padding[2]; /* Unused */ +}; + +/** + * struct bcm2835_isp_geq - Green equalisation parameters set with the + * V4L2_CID_USER_BCM2835_ISP_GEQ ctrl. + * + * @enabled: Enable green equalisation. + * @offset: Fixed offset of the green equalisation threshold. + * @slope: Slope of the green equalisation threshold. + */ +struct bcm2835_isp_geq { + __u32 enabled; + __u32 offset; + struct bcm2835_isp_rational slope; +}; + +#define BCM2835_NUM_GAMMA_PTS 33 + +/** + * struct bcm2835_isp_gamma - Gamma parameters set with the + * V4L2_CID_USER_BCM2835_ISP_GAMMA ctrl. + * + * @enabled: Enable gamma adjustment. + * @X: X values of the points defining the gamma curve. + * Values should be scaled to 16 bits. + * @Y: Y values of the points defining the gamma curve. + * Values should be scaled to 16 bits. + */ +struct bcm2835_isp_gamma { + __u32 enabled; + __u16 x[BCM2835_NUM_GAMMA_PTS]; + __u16 y[BCM2835_NUM_GAMMA_PTS]; +}; + +/** + * enum bcm2835_isp_cdn_mode - Mode of operation for colour denoise. + * + * @CDN_MODE_FAST: Fast (but lower quality) colour denoise + * algorithm, typically used for video recording. + * @CDN_HIGH_QUALITY: High quality (but slower) colour denoise + * algorithm, typically used for stills capture. + */ +enum bcm2835_isp_cdn_mode { + CDN_MODE_FAST = 0, + CDN_MODE_HIGH_QUALITY = 1, +}; + +/** + * struct bcm2835_isp_cdn - Colour denoise parameters set with the + * V4L2_CID_USER_BCM2835_ISP_CDN ctrl. + * + * @enabled: Enable colour denoise. + * @cdn_mode: Colour denoise operating mode (see enum &bcm2835_isp_cdn_mode) + */ +struct bcm2835_isp_cdn { + __u32 enabled; + __u32 mode; +}; + +/** + * struct bcm2835_isp_denoise - Denoise parameters set with the + * V4L2_CID_USER_BCM2835_ISP_DENOISE ctrl. + * + * @enabled: Enable denoise. + * @constant: Fixed offset of the noise threshold. + * @slope: Slope of the noise threshold. + * @strength: Denoise strength between 0.0 (off) and 1.0 (maximum). + */ +struct bcm2835_isp_denoise { + __u32 enabled; + __u32 constant; + struct bcm2835_isp_rational slope; + struct bcm2835_isp_rational strength; +}; + +/** + * struct bcm2835_isp_sharpen - Sharpen parameters set with the + * V4L2_CID_USER_BCM2835_ISP_SHARPEN ctrl. + * + * @enabled: Enable sharpening. + * @threshold: Threshold at which to start sharpening pixels. + * @strength: Strength with which pixel sharpening increases. + * @limit: Limit to the amount of sharpening applied. + */ +struct bcm2835_isp_sharpen { + __u32 enabled; + struct bcm2835_isp_rational threshold; + struct bcm2835_isp_rational strength; + struct bcm2835_isp_rational limit; +}; + +/** + * enum bcm2835_isp_dpc_mode - defective pixel correction (DPC) strength. + * + * @DPC_MODE_OFF: No DPC. + * @DPC_MODE_NORMAL: Normal DPC. + * @DPC_MODE_STRONG: Strong DPC. + */ +enum bcm2835_isp_dpc_mode { + DPC_MODE_OFF = 0, + DPC_MODE_NORMAL = 1, + DPC_MODE_STRONG = 2, +}; + +/** + * struct bcm2835_isp_dpc - Defective pixel correction (DPC) parameters set + * with the V4L2_CID_USER_BCM2835_ISP_DPC ctrl. + * + * @enabled: Enable DPC. + * @strength: DPC strength (see enum &bcm2835_isp_dpc_mode). + */ +struct bcm2835_isp_dpc { + __u32 enabled; + __u32 strength; +}; + +/* + * ISP statistics structures. + * + * The bcm2835_isp_stats structure is generated at the output of the + * statistics node. Note that this does not directly map onto the statistics + * output of the ISP HW. Instead, the MMAL firmware code maps the HW statistics + * to the bcm2835_isp_stats structure. + */ +#define DEFAULT_AWB_REGIONS_X 16 +#define DEFAULT_AWB_REGIONS_Y 12 + +#define NUM_HISTOGRAMS 2 +#define NUM_HISTOGRAM_BINS 128 +#define AWB_REGIONS (DEFAULT_AWB_REGIONS_X * DEFAULT_AWB_REGIONS_Y) +#define FLOATING_REGIONS 16 +#define AGC_REGIONS 16 +#define FOCUS_REGIONS 12 + +/** + * struct bcm2835_isp_stats_hist - Histogram statistics + * + * @r_hist: Red channel histogram. + * @g_hist: Combined green channel histogram. + * @b_hist: Blue channel histogram. + */ +struct bcm2835_isp_stats_hist { + __u32 r_hist[NUM_HISTOGRAM_BINS]; + __u32 g_hist[NUM_HISTOGRAM_BINS]; + __u32 b_hist[NUM_HISTOGRAM_BINS]; +}; + +/** + * struct bcm2835_isp_stats_region - Region sums. + * + * @counted: The number of 2x2 bayer tiles accumulated. + * @notcounted: The number of 2x2 bayer tiles not accumulated. + * @r_sum: Total sum of counted pixels in the red channel for a region. + * @g_sum: Total sum of counted pixels in the green channel for a region. + * @b_sum: Total sum of counted pixels in the blue channel for a region. + */ +struct bcm2835_isp_stats_region { + __u32 counted; + __u32 notcounted; + __u64 r_sum; + __u64 g_sum; + __u64 b_sum; +}; + +/** + * struct bcm2835_isp_stats_focus - Focus statistics. + * + * @contrast_val: Focus measure - accumulated output of the focus filter. + * In the first dimension, index [0] counts pixels below a + * preset threshold, and index [1] counts pixels above the + * threshold. In the second dimension, index [0] uses the + * first predefined filter, and index [1] uses the second + * predefined filter. + * @contrast_val_num: The number of counted pixels in the above accumulation. + */ +struct bcm2835_isp_stats_focus { + __u64 contrast_val[2][2]; + __u32 contrast_val_num[2][2]; +}; + +/** + * struct bcm2835_isp_stats - ISP statistics. + * + * @version: Version of the bcm2835_isp_stats structure. + * @size: Size of the bcm2835_isp_stats structure. + * @hist: Histogram statistics for the entire image. + * @awb_stats: Statistics for the regions defined for AWB calculations. + * @floating_stats: Statistics for arbitrarily placed (floating) regions. + * @agc_stats: Statistics for the regions defined for AGC calculations. + * @focus_stats: Focus filter statistics for the focus regions. + */ +struct bcm2835_isp_stats { + __u32 version; + __u32 size; + struct bcm2835_isp_stats_hist hist[NUM_HISTOGRAMS]; + struct bcm2835_isp_stats_region awb_stats[AWB_REGIONS]; + struct bcm2835_isp_stats_region floating_stats[FLOATING_REGIONS]; + struct bcm2835_isp_stats_region agc_stats[AGC_REGIONS]; + struct bcm2835_isp_stats_focus focus_stats[FOCUS_REGIONS]; +}; + +#endif /* __BCM2835_ISP_H_ */ diff --git a/spider-cam/libcamera/include/linux/dma-buf.h b/spider-cam/libcamera/include/linux/dma-buf.h new file mode 100644 index 0000000..5a6fda6 --- /dev/null +++ b/spider-cam/libcamera/include/linux/dma-buf.h @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Framework for buffer objects that can be shared across devices/subsystems. + * + * Copyright(C) 2015 Intel Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef _DMA_BUF_UAPI_H_ +#define _DMA_BUF_UAPI_H_ + +#include + +/** + * struct dma_buf_sync - Synchronize with CPU access. + * + * When a DMA buffer is accessed from the CPU via mmap, it is not always + * possible to guarantee coherency between the CPU-visible map and underlying + * memory. To manage coherency, DMA_BUF_IOCTL_SYNC must be used to bracket + * any CPU access to give the kernel the chance to shuffle memory around if + * needed. + * + * Prior to accessing the map, the client must call DMA_BUF_IOCTL_SYNC + * with DMA_BUF_SYNC_START and the appropriate read/write flags. Once the + * access is complete, the client should call DMA_BUF_IOCTL_SYNC with + * DMA_BUF_SYNC_END and the same read/write flags. + * + * The synchronization provided via DMA_BUF_IOCTL_SYNC only provides cache + * coherency. It does not prevent other processes or devices from + * accessing the memory at the same time. If synchronization with a GPU or + * other device driver is required, it is the client's responsibility to + * wait for buffer to be ready for reading or writing before calling this + * ioctl with DMA_BUF_SYNC_START. Likewise, the client must ensure that + * follow-up work is not submitted to GPU or other device driver until + * after this ioctl has been called with DMA_BUF_SYNC_END? + * + * If the driver or API with which the client is interacting uses implicit + * synchronization, waiting for prior work to complete can be done via + * poll() on the DMA buffer file descriptor. If the driver or API requires + * explicit synchronization, the client may have to wait on a sync_file or + * other synchronization primitive outside the scope of the DMA buffer API. + */ +struct dma_buf_sync { + /** + * @flags: Set of access flags + * + * DMA_BUF_SYNC_START: + * Indicates the start of a map access session. + * + * DMA_BUF_SYNC_END: + * Indicates the end of a map access session. + * + * DMA_BUF_SYNC_READ: + * Indicates that the mapped DMA buffer will be read by the + * client via the CPU map. + * + * DMA_BUF_SYNC_WRITE: + * Indicates that the mapped DMA buffer will be written by the + * client via the CPU map. + * + * DMA_BUF_SYNC_RW: + * An alias for DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE. + */ + __u64 flags; +}; + +#define DMA_BUF_SYNC_READ (1 << 0) +#define DMA_BUF_SYNC_WRITE (2 << 0) +#define DMA_BUF_SYNC_RW (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE) +#define DMA_BUF_SYNC_START (0 << 2) +#define DMA_BUF_SYNC_END (1 << 2) +#define DMA_BUF_SYNC_VALID_FLAGS_MASK \ + (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END) + +#define DMA_BUF_NAME_LEN 32 + +/** + * struct dma_buf_export_sync_file - Get a sync_file from a dma-buf + * + * Userspace can perform a DMA_BUF_IOCTL_EXPORT_SYNC_FILE to retrieve the + * current set of fences on a dma-buf file descriptor as a sync_file. CPU + * waits via poll() or other driver-specific mechanisms typically wait on + * whatever fences are on the dma-buf at the time the wait begins. This + * is similar except that it takes a snapshot of the current fences on the + * dma-buf for waiting later instead of waiting immediately. This is + * useful for modern graphics APIs such as Vulkan which assume an explicit + * synchronization model but still need to inter-operate with dma-buf. + * + * The intended usage pattern is the following: + * + * 1. Export a sync_file with flags corresponding to the expected GPU usage + * via DMA_BUF_IOCTL_EXPORT_SYNC_FILE. + * + * 2. Submit rendering work which uses the dma-buf. The work should wait on + * the exported sync file before rendering and produce another sync_file + * when complete. + * + * 3. Import the rendering-complete sync_file into the dma-buf with flags + * corresponding to the GPU usage via DMA_BUF_IOCTL_IMPORT_SYNC_FILE. + * + * Unlike doing implicit synchronization via a GPU kernel driver's exec ioctl, + * the above is not a single atomic operation. If userspace wants to ensure + * ordering via these fences, it is the respnosibility of userspace to use + * locks or other mechanisms to ensure that no other context adds fences or + * submits work between steps 1 and 3 above. + */ +struct dma_buf_export_sync_file { + /** + * @flags: Read/write flags + * + * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both. + * + * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set, + * the returned sync file waits on any writers of the dma-buf to + * complete. Waiting on the returned sync file is equivalent to + * poll() with POLLIN. + * + * If DMA_BUF_SYNC_WRITE is set, the returned sync file waits on + * any users of the dma-buf (read or write) to complete. Waiting + * on the returned sync file is equivalent to poll() with POLLOUT. + * If both DMA_BUF_SYNC_WRITE and DMA_BUF_SYNC_READ are set, this + * is equivalent to just DMA_BUF_SYNC_WRITE. + */ + __u32 flags; + /** @fd: Returned sync file descriptor */ + __s32 fd; +}; + +/** + * struct dma_buf_import_sync_file - Insert a sync_file into a dma-buf + * + * Userspace can perform a DMA_BUF_IOCTL_IMPORT_SYNC_FILE to insert a + * sync_file into a dma-buf for the purposes of implicit synchronization + * with other dma-buf consumers. This allows clients using explicitly + * synchronized APIs such as Vulkan to inter-op with dma-buf consumers + * which expect implicit synchronization such as OpenGL or most media + * drivers/video. + */ +struct dma_buf_import_sync_file { + /** + * @flags: Read/write flags + * + * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both. + * + * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set, + * this inserts the sync_file as a read-only fence. Any subsequent + * implicitly synchronized writes to this dma-buf will wait on this + * fence but reads will not. + * + * If DMA_BUF_SYNC_WRITE is set, this inserts the sync_file as a + * write fence. All subsequent implicitly synchronized access to + * this dma-buf will wait on this fence. + */ + __u32 flags; + /** @fd: Sync file descriptor */ + __s32 fd; +}; + +#define DMA_BUF_BASE 'b' +#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync) + +/* 32/64bitness of this uapi was botched in android, there's no difference + * between them in actual uapi, they're just different numbers. + */ +#define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *) +#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32) +#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64) +#define DMA_BUF_IOCTL_EXPORT_SYNC_FILE _IOWR(DMA_BUF_BASE, 2, struct dma_buf_export_sync_file) +#define DMA_BUF_IOCTL_IMPORT_SYNC_FILE _IOW(DMA_BUF_BASE, 3, struct dma_buf_import_sync_file) + +#endif diff --git a/spider-cam/libcamera/include/linux/dma-heap.h b/spider-cam/libcamera/include/linux/dma-heap.h new file mode 100644 index 0000000..96b90cf --- /dev/null +++ b/spider-cam/libcamera/include/linux/dma-heap.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * DMABUF Heaps Userspace API + * + * Copyright (C) 2011 Google, Inc. + * Copyright (C) 2019 Linaro Ltd. + */ +#ifndef _LINUX_DMABUF_POOL_H +#define _LINUX_DMABUF_POOL_H + +#include +#include + +/** + * DOC: DMABUF Heaps Userspace API + */ + +/* Valid FD_FLAGS are O_CLOEXEC, O_RDONLY, O_WRONLY, O_RDWR */ +#define DMA_HEAP_VALID_FD_FLAGS (O_CLOEXEC | O_ACCMODE) + +/* Currently no heap flags */ +#define DMA_HEAP_VALID_HEAP_FLAGS (0) + +/** + * struct dma_heap_allocation_data - metadata passed from userspace for + * allocations + * @len: size of the allocation + * @fd: will be populated with a fd which provides the + * handle to the allocated dma-buf + * @fd_flags: file descriptor flags used when allocating + * @heap_flags: flags passed to heap + * + * Provided by userspace as an argument to the ioctl + */ +struct dma_heap_allocation_data { + __u64 len; + __u32 fd; + __u32 fd_flags; + __u64 heap_flags; +}; + +#define DMA_HEAP_IOC_MAGIC 'H' + +/** + * DOC: DMA_HEAP_IOCTL_ALLOC - allocate memory from pool + * + * Takes a dma_heap_allocation_data struct and returns it with the fd field + * populated with the dmabuf handle of the allocation. + */ +#define DMA_HEAP_IOCTL_ALLOC _IOWR(DMA_HEAP_IOC_MAGIC, 0x0,\ + struct dma_heap_allocation_data) + +#endif /* _LINUX_DMABUF_POOL_H */ diff --git a/spider-cam/libcamera/include/linux/drm_fourcc.h b/spider-cam/libcamera/include/linux/drm_fourcc.h new file mode 100644 index 0000000..d8e9994 --- /dev/null +++ b/spider-cam/libcamera/include/linux/drm_fourcc.h @@ -0,0 +1,1681 @@ +/* + * Copyright 2011 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef DRM_FOURCC_H +#define DRM_FOURCC_H + +#include "drm.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/** + * DOC: overview + * + * In the DRM subsystem, framebuffer pixel formats are described using the + * fourcc codes defined in `include/uapi/drm/drm_fourcc.h`. In addition to the + * fourcc code, a Format Modifier may optionally be provided, in order to + * further describe the buffer's format - for example tiling or compression. + * + * Format Modifiers + * ---------------- + * + * Format modifiers are used in conjunction with a fourcc code, forming a + * unique fourcc:modifier pair. This format:modifier pair must fully define the + * format and data layout of the buffer, and should be the only way to describe + * that particular buffer. + * + * Having multiple fourcc:modifier pairs which describe the same layout should + * be avoided, as such aliases run the risk of different drivers exposing + * different names for the same data format, forcing userspace to understand + * that they are aliases. + * + * Format modifiers may change any property of the buffer, including the number + * of planes and/or the required allocation size. Format modifiers are + * vendor-namespaced, and as such the relationship between a fourcc code and a + * modifier is specific to the modifier being used. For example, some modifiers + * may preserve meaning - such as number of planes - from the fourcc code, + * whereas others may not. + * + * Modifiers must uniquely encode buffer layout. In other words, a buffer must + * match only a single modifier. A modifier must not be a subset of layouts of + * another modifier. For instance, it's incorrect to encode pitch alignment in + * a modifier: a buffer may match a 64-pixel aligned modifier and a 32-pixel + * aligned modifier. That said, modifiers can have implicit minimal + * requirements. + * + * For modifiers where the combination of fourcc code and modifier can alias, + * a canonical pair needs to be defined and used by all drivers. Preferred + * combinations are also encouraged where all combinations might lead to + * confusion and unnecessarily reduced interoperability. An example for the + * latter is AFBC, where the ABGR layouts are preferred over ARGB layouts. + * + * There are two kinds of modifier users: + * + * - Kernel and user-space drivers: for drivers it's important that modifiers + * don't alias, otherwise two drivers might support the same format but use + * different aliases, preventing them from sharing buffers in an efficient + * format. + * - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users + * see modifiers as opaque tokens they can check for equality and intersect. + * These users mustn't need to know to reason about the modifier value + * (i.e. they are not expected to extract information out of the modifier). + * + * Vendors should document their modifier usage in as much detail as + * possible, to ensure maximum compatibility across devices, drivers and + * applications. + * + * The authoritative list of format modifier codes is found in + * `include/uapi/drm/drm_fourcc.h` + * + * Open Source User Waiver + * ----------------------- + * + * Because this is the authoritative source for pixel formats and modifiers + * referenced by GL, Vulkan extensions and other standards and hence used both + * by open source and closed source driver stacks, the usual requirement for an + * upstream in-kernel or open source userspace user does not apply. + * + * To ensure, as much as feasible, compatibility across stacks and avoid + * confusion with incompatible enumerations stakeholders for all relevant driver + * stacks should approve additions. + */ + +#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \ + ((__u32)(c) << 16) | ((__u32)(d) << 24)) + +#define DRM_FORMAT_BIG_ENDIAN (1U<<31) /* format is big endian instead of little endian */ + +/* Reserve 0 for the invalid format specifier */ +#define DRM_FORMAT_INVALID 0 + +/* color index */ +#define DRM_FORMAT_C1 fourcc_code('C', '1', ' ', ' ') /* [7:0] C0:C1:C2:C3:C4:C5:C6:C7 1:1:1:1:1:1:1:1 eight pixels/byte */ +#define DRM_FORMAT_C2 fourcc_code('C', '2', ' ', ' ') /* [7:0] C0:C1:C2:C3 2:2:2:2 four pixels/byte */ +#define DRM_FORMAT_C4 fourcc_code('C', '4', ' ', ' ') /* [7:0] C0:C1 4:4 two pixels/byte */ +#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */ + +/* 1 bpp Darkness (inverse relationship between channel value and brightness) */ +#define DRM_FORMAT_D1 fourcc_code('D', '1', ' ', ' ') /* [7:0] D0:D1:D2:D3:D4:D5:D6:D7 1:1:1:1:1:1:1:1 eight pixels/byte */ + +/* 2 bpp Darkness (inverse relationship between channel value and brightness) */ +#define DRM_FORMAT_D2 fourcc_code('D', '2', ' ', ' ') /* [7:0] D0:D1:D2:D3 2:2:2:2 four pixels/byte */ + +/* 4 bpp Darkness (inverse relationship between channel value and brightness) */ +#define DRM_FORMAT_D4 fourcc_code('D', '4', ' ', ' ') /* [7:0] D0:D1 4:4 two pixels/byte */ + +/* 8 bpp Darkness (inverse relationship between channel value and brightness) */ +#define DRM_FORMAT_D8 fourcc_code('D', '8', ' ', ' ') /* [7:0] D */ + +/* 1 bpp Red (direct relationship between channel value and brightness) */ +#define DRM_FORMAT_R1 fourcc_code('R', '1', ' ', ' ') /* [7:0] R0:R1:R2:R3:R4:R5:R6:R7 1:1:1:1:1:1:1:1 eight pixels/byte */ + +/* 2 bpp Red (direct relationship between channel value and brightness) */ +#define DRM_FORMAT_R2 fourcc_code('R', '2', ' ', ' ') /* [7:0] R0:R1:R2:R3 2:2:2:2 four pixels/byte */ + +/* 4 bpp Red (direct relationship between channel value and brightness) */ +#define DRM_FORMAT_R4 fourcc_code('R', '4', ' ', ' ') /* [7:0] R0:R1 4:4 two pixels/byte */ + +/* 8 bpp Red (direct relationship between channel value and brightness) */ +#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */ + +/* 10 bpp Red (direct relationship between channel value and brightness) */ +#define DRM_FORMAT_R10 fourcc_code('R', '1', '0', ' ') /* [15:0] x:R 6:10 little endian */ + +/* 12 bpp Red (direct relationship between channel value and brightness) */ +#define DRM_FORMAT_R12 fourcc_code('R', '1', '2', ' ') /* [15:0] x:R 4:12 little endian */ + +/* 16 bpp Red (direct relationship between channel value and brightness) */ +#define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */ + +/* 16 bpp RG */ +#define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */ +#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */ + +/* 32 bpp RG */ +#define DRM_FORMAT_RG1616 fourcc_code('R', 'G', '3', '2') /* [31:0] R:G 16:16 little endian */ +#define DRM_FORMAT_GR1616 fourcc_code('G', 'R', '3', '2') /* [31:0] G:R 16:16 little endian */ + +/* 8 bpp RGB */ +#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */ +#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */ + +/* 16 bpp RGB */ +#define DRM_FORMAT_XRGB4444 fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */ +#define DRM_FORMAT_XBGR4444 fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */ +#define DRM_FORMAT_RGBX4444 fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */ +#define DRM_FORMAT_BGRX4444 fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */ + +#define DRM_FORMAT_ARGB4444 fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */ +#define DRM_FORMAT_ABGR4444 fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */ +#define DRM_FORMAT_RGBA4444 fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */ +#define DRM_FORMAT_BGRA4444 fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */ + +#define DRM_FORMAT_XRGB1555 fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */ +#define DRM_FORMAT_XBGR1555 fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */ +#define DRM_FORMAT_RGBX5551 fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */ +#define DRM_FORMAT_BGRX5551 fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */ + +#define DRM_FORMAT_ARGB1555 fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */ +#define DRM_FORMAT_ABGR1555 fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */ +#define DRM_FORMAT_RGBA5551 fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */ +#define DRM_FORMAT_BGRA5551 fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */ + +#define DRM_FORMAT_RGB565 fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */ +#define DRM_FORMAT_BGR565 fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */ + +/* 24 bpp RGB */ +#define DRM_FORMAT_RGB888 fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */ +#define DRM_FORMAT_BGR888 fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */ + +/* 32 bpp RGB */ +#define DRM_FORMAT_XRGB8888 fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */ +#define DRM_FORMAT_XBGR8888 fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */ +#define DRM_FORMAT_RGBX8888 fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */ +#define DRM_FORMAT_BGRX8888 fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */ + +#define DRM_FORMAT_ARGB8888 fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */ +#define DRM_FORMAT_ABGR8888 fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */ +#define DRM_FORMAT_RGBA8888 fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */ +#define DRM_FORMAT_BGRA8888 fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */ + +#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */ +#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */ +#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */ +#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */ + +#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */ +#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */ +#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */ +#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */ + +/* 48 bpp RGB */ +#define DRM_FORMAT_RGB161616 fourcc_code('R', 'G', '4', '8') /* [47:0] R:G:B 16:16:16 little endian */ +#define DRM_FORMAT_BGR161616 fourcc_code('B', 'G', '4', '8') /* [47:0] B:G:R 16:16:16 little endian */ + +/* 64 bpp RGB */ +#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */ +#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */ + +#define DRM_FORMAT_ARGB16161616 fourcc_code('A', 'R', '4', '8') /* [63:0] A:R:G:B 16:16:16:16 little endian */ +#define DRM_FORMAT_ABGR16161616 fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */ + +/* + * Floating point 64bpp RGB + * IEEE 754-2008 binary16 half-precision float + * [15:0] sign:exponent:mantissa 1:5:10 + */ +#define DRM_FORMAT_XRGB16161616F fourcc_code('X', 'R', '4', 'H') /* [63:0] x:R:G:B 16:16:16:16 little endian */ +#define DRM_FORMAT_XBGR16161616F fourcc_code('X', 'B', '4', 'H') /* [63:0] x:B:G:R 16:16:16:16 little endian */ + +#define DRM_FORMAT_ARGB16161616F fourcc_code('A', 'R', '4', 'H') /* [63:0] A:R:G:B 16:16:16:16 little endian */ +#define DRM_FORMAT_ABGR16161616F fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */ + +/* + * RGBA format with 10-bit components packed in 64-bit per pixel, with 6 bits + * of unused padding per component: + */ +#define DRM_FORMAT_AXBXGXRX106106106106 fourcc_code('A', 'B', '1', '0') /* [63:0] A:x:B:x:G:x:R:x 10:6:10:6:10:6:10:6 little endian */ + +/* packed YCbCr */ +#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */ +#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */ +#define DRM_FORMAT_UYVY fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */ +#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */ + +#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */ +#define DRM_FORMAT_AVUY8888 fourcc_code('A', 'V', 'U', 'Y') /* [31:0] A:Cr:Cb:Y 8:8:8:8 little endian */ +#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */ +#define DRM_FORMAT_XVUY8888 fourcc_code('X', 'V', 'U', 'Y') /* [31:0] X:Cr:Cb:Y 8:8:8:8 little endian */ +#define DRM_FORMAT_VUY888 fourcc_code('V', 'U', '2', '4') /* [23:0] Cr:Cb:Y 8:8:8 little endian */ +#define DRM_FORMAT_VUY101010 fourcc_code('V', 'U', '3', '0') /* Y followed by U then V, 10:10:10. Non-linear modifier only */ + +/* + * packed Y2xx indicate for each component, xx valid data occupy msb + * 16-xx padding occupy lsb + */ +#define DRM_FORMAT_Y210 fourcc_code('Y', '2', '1', '0') /* [63:0] Cr0:0:Y1:0:Cb0:0:Y0:0 10:6:10:6:10:6:10:6 little endian per 2 Y pixels */ +#define DRM_FORMAT_Y212 fourcc_code('Y', '2', '1', '2') /* [63:0] Cr0:0:Y1:0:Cb0:0:Y0:0 12:4:12:4:12:4:12:4 little endian per 2 Y pixels */ +#define DRM_FORMAT_Y216 fourcc_code('Y', '2', '1', '6') /* [63:0] Cr0:Y1:Cb0:Y0 16:16:16:16 little endian per 2 Y pixels */ + +/* + * packed Y4xx indicate for each component, xx valid data occupy msb + * 16-xx padding occupy lsb except Y410 + */ +#define DRM_FORMAT_Y410 fourcc_code('Y', '4', '1', '0') /* [31:0] A:Cr:Y:Cb 2:10:10:10 little endian */ +#define DRM_FORMAT_Y412 fourcc_code('Y', '4', '1', '2') /* [63:0] A:0:Cr:0:Y:0:Cb:0 12:4:12:4:12:4:12:4 little endian */ +#define DRM_FORMAT_Y416 fourcc_code('Y', '4', '1', '6') /* [63:0] A:Cr:Y:Cb 16:16:16:16 little endian */ + +#define DRM_FORMAT_XVYU2101010 fourcc_code('X', 'V', '3', '0') /* [31:0] X:Cr:Y:Cb 2:10:10:10 little endian */ +#define DRM_FORMAT_XVYU12_16161616 fourcc_code('X', 'V', '3', '6') /* [63:0] X:0:Cr:0:Y:0:Cb:0 12:4:12:4:12:4:12:4 little endian */ +#define DRM_FORMAT_XVYU16161616 fourcc_code('X', 'V', '4', '8') /* [63:0] X:Cr:Y:Cb 16:16:16:16 little endian */ + +/* + * packed YCbCr420 2x2 tiled formats + * first 64 bits will contain Y,Cb,Cr components for a 2x2 tile + */ +/* [63:0] A3:A2:Y3:0:Cr0:0:Y2:0:A1:A0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */ +#define DRM_FORMAT_Y0L0 fourcc_code('Y', '0', 'L', '0') +/* [63:0] X3:X2:Y3:0:Cr0:0:Y2:0:X1:X0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */ +#define DRM_FORMAT_X0L0 fourcc_code('X', '0', 'L', '0') + +/* [63:0] A3:A2:Y3:Cr0:Y2:A1:A0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */ +#define DRM_FORMAT_Y0L2 fourcc_code('Y', '0', 'L', '2') +/* [63:0] X3:X2:Y3:Cr0:Y2:X1:X0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */ +#define DRM_FORMAT_X0L2 fourcc_code('X', '0', 'L', '2') + +/* + * 1-plane YUV 4:2:0 + * In these formats, the component ordering is specified (Y, followed by U + * then V), but the exact Linear layout is undefined. + * These formats can only be used with a non-Linear modifier. + */ +#define DRM_FORMAT_YUV420_8BIT fourcc_code('Y', 'U', '0', '8') +#define DRM_FORMAT_YUV420_10BIT fourcc_code('Y', 'U', '1', '0') + +/* + * 2 plane RGB + A + * index 0 = RGB plane, same format as the corresponding non _A8 format has + * index 1 = A plane, [7:0] A + */ +#define DRM_FORMAT_XRGB8888_A8 fourcc_code('X', 'R', 'A', '8') +#define DRM_FORMAT_XBGR8888_A8 fourcc_code('X', 'B', 'A', '8') +#define DRM_FORMAT_RGBX8888_A8 fourcc_code('R', 'X', 'A', '8') +#define DRM_FORMAT_BGRX8888_A8 fourcc_code('B', 'X', 'A', '8') +#define DRM_FORMAT_RGB888_A8 fourcc_code('R', '8', 'A', '8') +#define DRM_FORMAT_BGR888_A8 fourcc_code('B', '8', 'A', '8') +#define DRM_FORMAT_RGB565_A8 fourcc_code('R', '5', 'A', '8') +#define DRM_FORMAT_BGR565_A8 fourcc_code('B', '5', 'A', '8') + +/* + * 2 plane YCbCr + * index 0 = Y plane, [7:0] Y + * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian + * or + * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian + */ +#define DRM_FORMAT_NV12 fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */ +#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */ +#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */ +#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */ +#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */ +#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */ +/* + * 2 plane YCbCr + * index 0 = Y plane, [39:0] Y3:Y2:Y1:Y0 little endian + * index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian + */ +#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */ +#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */ +#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */ + +/* + * 2 plane YCbCr MSB aligned + * index 0 = Y plane, [15:0] Y:x [10:6] little endian + * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian + */ +#define DRM_FORMAT_P210 fourcc_code('P', '2', '1', '0') /* 2x1 subsampled Cr:Cb plane, 10 bit per channel */ + +/* + * 2 plane YCbCr MSB aligned + * index 0 = Y plane, [15:0] Y:x [10:6] little endian + * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian + */ +#define DRM_FORMAT_P010 fourcc_code('P', '0', '1', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel */ + +/* + * 2 plane YCbCr MSB aligned + * index 0 = Y plane, [15:0] Y:x [12:4] little endian + * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [12:4:12:4] little endian + */ +#define DRM_FORMAT_P012 fourcc_code('P', '0', '1', '2') /* 2x2 subsampled Cr:Cb plane 12 bits per channel */ + +/* + * 2 plane YCbCr MSB aligned + * index 0 = Y plane, [15:0] Y little endian + * index 1 = Cr:Cb plane, [31:0] Cr:Cb [16:16] little endian + */ +#define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */ + +/* 2 plane YCbCr420. + * 3 10 bit components and 2 padding bits packed into 4 bytes. + * index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian + * index 1 = Cr:Cb plane, [63:0] x:Cr2:Cb2:Cr1:x:Cb1:Cr0:Cb0 [2:10:10:10:2:10:10:10] little endian + */ +#define DRM_FORMAT_P030 fourcc_code('P', '0', '3', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel packed */ + +/* 3 plane non-subsampled (444) YCbCr + * 16 bits per component, but only 10 bits are used and 6 bits are padded + * index 0: Y plane, [15:0] Y:x [10:6] little endian + * index 1: Cb plane, [15:0] Cb:x [10:6] little endian + * index 2: Cr plane, [15:0] Cr:x [10:6] little endian + */ +#define DRM_FORMAT_Q410 fourcc_code('Q', '4', '1', '0') + +/* 3 plane non-subsampled (444) YCrCb + * 16 bits per component, but only 10 bits are used and 6 bits are padded + * index 0: Y plane, [15:0] Y:x [10:6] little endian + * index 1: Cr plane, [15:0] Cr:x [10:6] little endian + * index 2: Cb plane, [15:0] Cb:x [10:6] little endian + */ +#define DRM_FORMAT_Q401 fourcc_code('Q', '4', '0', '1') + +/* + * 3 plane YCbCr + * index 0: Y plane, [7:0] Y + * index 1: Cb plane, [7:0] Cb + * index 2: Cr plane, [7:0] Cr + * or + * index 1: Cr plane, [7:0] Cr + * index 2: Cb plane, [7:0] Cb + */ +#define DRM_FORMAT_YUV410 fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */ +#define DRM_FORMAT_YVU410 fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */ +#define DRM_FORMAT_YUV411 fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */ +#define DRM_FORMAT_YVU411 fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */ +#define DRM_FORMAT_YUV420 fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */ +#define DRM_FORMAT_YVU420 fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */ +#define DRM_FORMAT_YUV422 fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */ +#define DRM_FORMAT_YVU422 fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */ +#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */ +#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */ + +/* Compressed formats */ +#define DRM_FORMAT_MJPEG fourcc_code('M', 'J', 'P', 'G') /* Motion-JPEG */ + +/* + * Bayer formats + * + * Bayer formats contain green, red and blue components, with alternating lines + * of red and green, and blue and green pixels in different orders. For each + * block of 2x2 pixels there is one pixel with a red filter, two with a green + * filter, and one with a blue filter. The filters can be arranged in different + * patterns. + * + * For example, RGGB: + * row0: RGRGRGRG... + * row1: GBGBGBGB... + * row3: RGRGRGRG... + * row4: GBGBGBGB... + * ... + * + * Vendors have different methods to pack the sampling formats to increase data + * density. For this reason the fourcc only describes pixel sample size and the + * filter pattern for each block of 2x2 pixels. A modifier is needed to + * describe the memory layout. + * + * In addition to vendor modifiers for memory layout DRM_FORMAT_MOD_LINEAR may + * be used to describe a layout where all samples are placed consecutively in + * memory. If the sample does not fit inside a single byte, the sample storage + * is extended to the minimum number of (little endian) bytes that can hold the + * sample and any unused most-significant bits are defined as padding. + * + * For example, SRGGB10: + * Each 10-bit sample is contained in 2 consecutive little endian bytes, where + * the 6 most-significant bits are unused. + */ + +/* 8-bit Bayer formats */ +#define DRM_FORMAT_SRGGB8 fourcc_code('R', 'G', 'G', 'B') +#define DRM_FORMAT_SGRBG8 fourcc_code('G', 'R', 'B', 'G') +#define DRM_FORMAT_SGBRG8 fourcc_code('G', 'B', 'R', 'G') +#define DRM_FORMAT_SBGGR8 fourcc_code('B', 'A', '8', '1') + +/* 10-bit Bayer formats */ +#define DRM_FORMAT_SRGGB10 fourcc_code('R', 'G', '1', '0') +#define DRM_FORMAT_SGRBG10 fourcc_code('B', 'A', '1', '0') +#define DRM_FORMAT_SGBRG10 fourcc_code('G', 'B', '1', '0') +#define DRM_FORMAT_SBGGR10 fourcc_code('B', 'G', '1', '0') + +/* 12-bit Bayer formats */ +#define DRM_FORMAT_SRGGB12 fourcc_code('R', 'G', '1', '2') +#define DRM_FORMAT_SGRBG12 fourcc_code('B', 'A', '1', '2') +#define DRM_FORMAT_SGBRG12 fourcc_code('G', 'B', '1', '2') +#define DRM_FORMAT_SBGGR12 fourcc_code('B', 'G', '1', '2') + +/* 14-bit Bayer formats */ +#define DRM_FORMAT_SRGGB14 fourcc_code('R', 'G', '1', '4') +#define DRM_FORMAT_SGRBG14 fourcc_code('B', 'A', '1', '4') +#define DRM_FORMAT_SGBRG14 fourcc_code('G', 'B', '1', '4') +#define DRM_FORMAT_SBGGR14 fourcc_code('B', 'G', '1', '4') + +/* 16-bit Bayer formats */ +#define DRM_FORMAT_SRGGB16 fourcc_code('R', 'G', 'B', '6') +#define DRM_FORMAT_SGRBG16 fourcc_code('G', 'R', '1', '6') +#define DRM_FORMAT_SGBRG16 fourcc_code('G', 'B', '1', '6') +#define DRM_FORMAT_SBGGR16 fourcc_code('B', 'Y', 'R', '2') + +/* + * Format Modifiers: + * + * Format modifiers describe, typically, a re-ordering or modification + * of the data in a plane of an FB. This can be used to express tiled/ + * swizzled formats, or compression, or a combination of the two. + * + * The upper 8 bits of the format modifier are a vendor-id as assigned + * below. The lower 56 bits are assigned as vendor sees fit. + */ + +/* Vendor Ids: */ +#define DRM_FORMAT_MOD_VENDOR_NONE 0 +#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01 +#define DRM_FORMAT_MOD_VENDOR_AMD 0x02 +#define DRM_FORMAT_MOD_VENDOR_NVIDIA 0x03 +#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04 +#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05 +#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06 +#define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07 +#define DRM_FORMAT_MOD_VENDOR_ARM 0x08 +#define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09 +#define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a +#define DRM_FORMAT_MOD_VENDOR_MIPI 0x0b +#define DRM_FORMAT_MOD_VENDOR_RPI 0x0c + +/* add more to the end as needed */ + +#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1) + +#define fourcc_mod_get_vendor(modifier) \ + (((modifier) >> 56) & 0xff) + +#define fourcc_mod_is_vendor(modifier, vendor) \ + (fourcc_mod_get_vendor(modifier) == DRM_FORMAT_MOD_VENDOR_## vendor) + +#define fourcc_mod_code(vendor, val) \ + ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL)) + +/* + * Format Modifier tokens: + * + * When adding a new token please document the layout with a code comment, + * similar to the fourcc codes above. drm_fourcc.h is considered the + * authoritative source for all of these. + * + * Generic modifier names: + * + * DRM_FORMAT_MOD_GENERIC_* definitions are used to provide vendor-neutral names + * for layouts which are common across multiple vendors. To preserve + * compatibility, in cases where a vendor-specific definition already exists and + * a generic name for it is desired, the common name is a purely symbolic alias + * and must use the same numerical value as the original definition. + * + * Note that generic names should only be used for modifiers which describe + * generic layouts (such as pixel re-ordering), which may have + * independently-developed support across multiple vendors. + * + * In future cases where a generic layout is identified before merging with a + * vendor-specific modifier, a new 'GENERIC' vendor or modifier using vendor + * 'NONE' could be considered. This should only be for obvious, exceptional + * cases to avoid polluting the 'GENERIC' namespace with modifiers which only + * apply to a single vendor. + * + * Generic names should not be used for cases where multiple hardware vendors + * have implementations of the same standardised compression scheme (such as + * AFBC). In those cases, all implementations should use the same format + * modifier(s), reflecting the vendor of the standard. + */ + +#define DRM_FORMAT_MOD_GENERIC_16_16_TILE DRM_FORMAT_MOD_SAMSUNG_16_16_TILE + +/* + * Invalid Modifier + * + * This modifier can be used as a sentinel to terminate the format modifiers + * list, or to initialize a variable with an invalid modifier. It might also be + * used to report an error back to userspace for certain APIs. + */ +#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED) + +/* + * Linear Layout + * + * Just plain linear layout. Note that this is different from no specifying any + * modifier (e.g. not setting DRM_MODE_FB_MODIFIERS in the DRM_ADDFB2 ioctl), + * which tells the driver to also take driver-internal information into account + * and so might actually result in a tiled framebuffer. + */ +#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0) + +/* + * Deprecated: use DRM_FORMAT_MOD_LINEAR instead + * + * The "none" format modifier doesn't actually mean that the modifier is + * implicit, instead it means that the layout is linear. Whether modifiers are + * used is out-of-band information carried in an API-specific way (e.g. in a + * flag for drm_mode_fb_cmd2). + */ +#define DRM_FORMAT_MOD_NONE 0 + +/* Intel framebuffer modifiers */ + +/* + * Intel X-tiling layout + * + * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb) + * in row-major layout. Within the tile bytes are laid out row-major, with + * a platform-dependent stride. On top of that the memory can apply + * platform-depending swizzling of some higher address bits into bit6. + * + * Note that this layout is only accurate on intel gen 8+ or valleyview chipsets. + * On earlier platforms the is highly platforms specific and not useful for + * cross-driver sharing. It exists since on a given platform it does uniquely + * identify the layout in a simple way for i915-specific userspace, which + * facilitated conversion of userspace to modifiers. Additionally the exact + * format on some really old platforms is not known. + */ +#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1) + +/* + * Intel Y-tiling layout + * + * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb) + * in row-major layout. Within the tile bytes are laid out in OWORD (16 bytes) + * chunks column-major, with a platform-dependent height. On top of that the + * memory can apply platform-depending swizzling of some higher address bits + * into bit6. + * + * Note that this layout is only accurate on intel gen 8+ or valleyview chipsets. + * On earlier platforms the is highly platforms specific and not useful for + * cross-driver sharing. It exists since on a given platform it does uniquely + * identify the layout in a simple way for i915-specific userspace, which + * facilitated conversion of userspace to modifiers. Additionally the exact + * format on some really old platforms is not known. + */ +#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2) + +/* + * Intel Yf-tiling layout + * + * This is a tiled layout using 4Kb tiles in row-major layout. + * Within the tile pixels are laid out in 16 256 byte units / sub-tiles which + * are arranged in four groups (two wide, two high) with column-major layout. + * Each group therefore consists out of four 256 byte units, which are also laid + * out as 2x2 column-major. + * 256 byte units are made out of four 64 byte blocks of pixels, producing + * either a square block or a 2:1 unit. + * 64 byte blocks of pixels contain four pixel rows of 16 bytes, where the width + * in pixel depends on the pixel depth. + */ +#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3) + +/* + * Intel color control surface (CCS) for render compression + * + * The framebuffer format must be one of the 8:8:8:8 RGB formats. + * The main surface will be plane index 0 and must be Y/Yf-tiled, + * the CCS will be plane index 1. + * + * Each CCS tile matches a 1024x512 pixel area of the main surface. + * To match certain aspects of the 3D hardware the CCS is + * considered to be made up of normal 128Bx32 Y tiles, Thus + * the CCS pitch must be specified in multiples of 128 bytes. + * + * In reality the CCS tile appears to be a 64Bx64 Y tile, composed + * of QWORD (8 bytes) chunks instead of OWORD (16 bytes) chunks. + * But that fact is not relevant unless the memory is accessed + * directly. + */ +#define I915_FORMAT_MOD_Y_TILED_CCS fourcc_mod_code(INTEL, 4) +#define I915_FORMAT_MOD_Yf_TILED_CCS fourcc_mod_code(INTEL, 5) + +/* + * Intel color control surfaces (CCS) for Gen-12 render compression. + * + * The main surface is Y-tiled and at plane index 0, the CCS is linear and + * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in + * main surface. In other words, 4 bits in CCS map to a main surface cache + * line pair. The main surface pitch is required to be a multiple of four + * Y-tile widths. + */ +#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS fourcc_mod_code(INTEL, 6) + +/* + * Intel color control surfaces (CCS) for Gen-12 media compression + * + * The main surface is Y-tiled and at plane index 0, the CCS is linear and + * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in + * main surface. In other words, 4 bits in CCS map to a main surface cache + * line pair. The main surface pitch is required to be a multiple of four + * Y-tile widths. For semi-planar formats like NV12, CCS planes follow the + * Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces, + * planes 2 and 3 for the respective CCS. + */ +#define I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7) + +/* + * Intel Color Control Surface with Clear Color (CCS) for Gen-12 render + * compression. + * + * The main surface is Y-tiled and is at plane index 0 whereas CCS is linear + * and at index 1. The clear color is stored at index 2, and the pitch should + * be 64 bytes aligned. The clear color structure is 256 bits. The first 128 bits + * represents Raw Clear Color Red, Green, Blue and Alpha color each represented + * by 32 bits. The raw clear color is consumed by the 3d engine and generates + * the converted clear color of size 64 bits. The first 32 bits store the Lower + * Converted Clear Color value and the next 32 bits store the Higher Converted + * Clear Color value when applicable. The Converted Clear Color values are + * consumed by the DE. The last 64 bits are used to store Color Discard Enable + * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line + * corresponds to an area of 4x1 tiles in the main surface. The main surface + * pitch is required to be a multiple of 4 tile widths. + */ +#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8) + +/* + * Intel Tile 4 layout + * + * This is a tiled layout using 4KB tiles in a row-major layout. It has the same + * shape as Tile Y at two granularities: 4KB (128B x 32) and 64B (16B x 4). It + * only differs from Tile Y at the 256B granularity in between. At this + * granularity, Tile Y has a shape of 16B x 32 rows, but this tiling has a shape + * of 64B x 8 rows. + */ +#define I915_FORMAT_MOD_4_TILED fourcc_mod_code(INTEL, 9) + +/* + * Intel color control surfaces (CCS) for DG2 render compression. + * + * The main surface is Tile 4 and at plane index 0. The CCS data is stored + * outside of the GEM object in a reserved memory area dedicated for the + * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The + * main surface pitch is required to be a multiple of four Tile 4 widths. + */ +#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS fourcc_mod_code(INTEL, 10) + +/* + * Intel color control surfaces (CCS) for DG2 media compression. + * + * The main surface is Tile 4 and at plane index 0. For semi-planar formats + * like NV12, the Y and UV planes are Tile 4 and are located at plane indices + * 0 and 1, respectively. The CCS for all planes are stored outside of the + * GEM object in a reserved memory area dedicated for the storage of the + * CCS data for all RC/RC_CC/MC compressible GEM objects. The main surface + * pitch is required to be a multiple of four Tile 4 widths. + */ +#define I915_FORMAT_MOD_4_TILED_DG2_MC_CCS fourcc_mod_code(INTEL, 11) + +/* + * Intel Color Control Surface with Clear Color (CCS) for DG2 render compression. + * + * The main surface is Tile 4 and at plane index 0. The CCS data is stored + * outside of the GEM object in a reserved memory area dedicated for the + * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The + * main surface pitch is required to be a multiple of four Tile 4 widths. The + * clear color is stored at plane index 1 and the pitch should be 64 bytes + * aligned. The format of the 256 bits of clear color data matches the one used + * for the I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC modifier, see its description + * for details. + */ +#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12) + +/* + * Intel Color Control Surfaces (CCS) for display ver. 14 render compression. + * + * The main surface is tile4 and at plane index 0, the CCS is linear and + * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in + * main surface. In other words, 4 bits in CCS map to a main surface cache + * line pair. The main surface pitch is required to be a multiple of four + * tile4 widths. + */ +#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS fourcc_mod_code(INTEL, 13) + +/* + * Intel Color Control Surfaces (CCS) for display ver. 14 media compression + * + * The main surface is tile4 and at plane index 0, the CCS is linear and + * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in + * main surface. In other words, 4 bits in CCS map to a main surface cache + * line pair. The main surface pitch is required to be a multiple of four + * tile4 widths. For semi-planar formats like NV12, CCS planes follow the + * Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces, + * planes 2 and 3 for the respective CCS. + */ +#define I915_FORMAT_MOD_4_TILED_MTL_MC_CCS fourcc_mod_code(INTEL, 14) + +/* + * Intel Color Control Surface with Clear Color (CCS) for display ver. 14 render + * compression. + * + * The main surface is tile4 and is at plane index 0 whereas CCS is linear + * and at index 1. The clear color is stored at index 2, and the pitch should + * be ignored. The clear color structure is 256 bits. The first 128 bits + * represents Raw Clear Color Red, Green, Blue and Alpha color each represented + * by 32 bits. The raw clear color is consumed by the 3d engine and generates + * the converted clear color of size 64 bits. The first 32 bits store the Lower + * Converted Clear Color value and the next 32 bits store the Higher Converted + * Clear Color value when applicable. The Converted Clear Color values are + * consumed by the DE. The last 64 bits are used to store Color Discard Enable + * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line + * corresponds to an area of 4x1 tiles in the main surface. The main surface + * pitch is required to be a multiple of 4 tile widths. + */ +#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15) + +/* + * IPU3 Bayer packing layout + * + * The IPU3 raw Bayer formats use a custom packing layout where there are no + * gaps between each 10-bit sample. It packs 25 pixels into 32 bytes leaving + * the 6 most significant bits in the last byte unused. The format is little + * endian. + */ +#define IPU3_FORMAT_MOD_PACKED fourcc_mod_code(INTEL, 13) + +/* + * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks + * + * Macroblocks are laid in a Z-shape, and each pixel data is following the + * standard NV12 style. + * As for NV12, an image is the result of two frame buffers: one for Y, + * one for the interleaved Cb/Cr components (1/2 the height of the Y buffer). + * Alignment requirements are (for each buffer): + * - multiple of 128 pixels for the width + * - multiple of 32 pixels for the height + * + * For more information: see https://linuxtv.org/downloads/v4l-dvb-apis/re32.html + */ +#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1) + +/* + * Tiled, 16 (pixels) x 16 (lines) - sized macroblocks + * + * This is a simple tiled layout using tiles of 16x16 pixels in a row-major + * layout. For YCbCr formats Cb/Cr components are taken in such a way that + * they correspond to their 16x16 luma block. + */ +#define DRM_FORMAT_MOD_SAMSUNG_16_16_TILE fourcc_mod_code(SAMSUNG, 2) + +/* + * Qualcomm Compressed Format + * + * Refers to a compressed variant of the base format that is compressed. + * Implementation may be platform and base-format specific. + * + * Each macrotile consists of m x n (mostly 4 x 4) tiles. + * Pixel data pitch/stride is aligned with macrotile width. + * Pixel data height is aligned with macrotile height. + * Entire pixel data buffer is aligned with 4k(bytes). + */ +#define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1) + +/* + * Qualcomm Tiled Format + * + * Similar to DRM_FORMAT_MOD_QCOM_COMPRESSED but not compressed. + * Implementation may be platform and base-format specific. + * + * Each macrotile consists of m x n (mostly 4 x 4) tiles. + * Pixel data pitch/stride is aligned with macrotile width. + * Pixel data height is aligned with macrotile height. + * Entire pixel data buffer is aligned with 4k(bytes). + */ +#define DRM_FORMAT_MOD_QCOM_TILED3 fourcc_mod_code(QCOM, 3) + +/* + * Qualcomm Alternate Tiled Format + * + * Alternate tiled format typically only used within GMEM. + * Implementation may be platform and base-format specific. + */ +#define DRM_FORMAT_MOD_QCOM_TILED2 fourcc_mod_code(QCOM, 2) + + +/* Vivante framebuffer modifiers */ + +/* + * Vivante 4x4 tiling layout + * + * This is a simple tiled layout using tiles of 4x4 pixels in a row-major + * layout. + */ +#define DRM_FORMAT_MOD_VIVANTE_TILED fourcc_mod_code(VIVANTE, 1) + +/* + * Vivante 64x64 super-tiling layout + * + * This is a tiled layout using 64x64 pixel super-tiles, where each super-tile + * contains 8x4 groups of 2x4 tiles of 4x4 pixels (like above) each, all in row- + * major layout. + * + * For more information: see + * https://github.com/etnaviv/etna_viv/blob/master/doc/hardware.md#texture-tiling + */ +#define DRM_FORMAT_MOD_VIVANTE_SUPER_TILED fourcc_mod_code(VIVANTE, 2) + +/* + * Vivante 4x4 tiling layout for dual-pipe + * + * Same as the 4x4 tiling layout, except every second 4x4 pixel tile starts at a + * different base address. Offsets from the base addresses are therefore halved + * compared to the non-split tiled layout. + */ +#define DRM_FORMAT_MOD_VIVANTE_SPLIT_TILED fourcc_mod_code(VIVANTE, 3) + +/* + * Vivante 64x64 super-tiling layout for dual-pipe + * + * Same as the 64x64 super-tiling layout, except every second 4x4 pixel tile + * starts at a different base address. Offsets from the base addresses are + * therefore halved compared to the non-split super-tiled layout. + */ +#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4) + +/* + * Vivante TS (tile-status) buffer modifiers. They can be combined with all of + * the color buffer tiling modifiers defined above. When TS is present it's a + * separate buffer containing the clear/compression status of each tile. The + * modifiers are defined as VIVANTE_MOD_TS_c_s, where c is the color buffer + * tile size in bytes covered by one entry in the status buffer and s is the + * number of status bits per entry. + * We reserve the top 8 bits of the Vivante modifier space for tile status + * clear/compression modifiers, as future cores might add some more TS layout + * variations. + */ +#define VIVANTE_MOD_TS_64_4 (1ULL << 48) +#define VIVANTE_MOD_TS_64_2 (2ULL << 48) +#define VIVANTE_MOD_TS_128_4 (3ULL << 48) +#define VIVANTE_MOD_TS_256_4 (4ULL << 48) +#define VIVANTE_MOD_TS_MASK (0xfULL << 48) + +/* + * Vivante compression modifiers. Those depend on a TS modifier being present + * as the TS bits get reinterpreted as compression tags instead of simple + * clear markers when compression is enabled. + */ +#define VIVANTE_MOD_COMP_DEC400 (1ULL << 52) +#define VIVANTE_MOD_COMP_MASK (0xfULL << 52) + +/* Masking out the extension bits will yield the base modifier. */ +#define VIVANTE_MOD_EXT_MASK (VIVANTE_MOD_TS_MASK | \ + VIVANTE_MOD_COMP_MASK) + +/* NVIDIA frame buffer modifiers */ + +/* + * Tegra Tiled Layout, used by Tegra 2, 3 and 4. + * + * Pixels are arranged in simple tiles of 16 x 16 bytes. + */ +#define DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED fourcc_mod_code(NVIDIA, 1) + +/* + * Generalized Block Linear layout, used by desktop GPUs starting with NV50/G80, + * and Tegra GPUs starting with Tegra K1. + * + * Pixels are arranged in Groups of Bytes (GOBs). GOB size and layout varies + * based on the architecture generation. GOBs themselves are then arranged in + * 3D blocks, with the block dimensions (in terms of GOBs) always being a power + * of two, and hence expressible as their log2 equivalent (E.g., "2" represents + * a block depth or height of "4"). + * + * Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format + * in full detail. + * + * Macro + * Bits Param Description + * ---- ----- ----------------------------------------------------------------- + * + * 3:0 h log2(height) of each block, in GOBs. Placed here for + * compatibility with the existing + * DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers. + * + * 4:4 - Must be 1, to indicate block-linear layout. Necessary for + * compatibility with the existing + * DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers. + * + * 8:5 - Reserved (To support 3D-surfaces with variable log2(depth) block + * size). Must be zero. + * + * Note there is no log2(width) parameter. Some portions of the + * hardware support a block width of two gobs, but it is impractical + * to use due to lack of support elsewhere, and has no known + * benefits. + * + * 11:9 - Reserved (To support 2D-array textures with variable array stride + * in blocks, specified via log2(tile width in blocks)). Must be + * zero. + * + * 19:12 k Page Kind. This value directly maps to a field in the page + * tables of all GPUs >= NV50. It affects the exact layout of bits + * in memory and can be derived from the tuple + * + * (format, GPU model, compression type, samples per pixel) + * + * Where compression type is defined below. If GPU model were + * implied by the format modifier, format, or memory buffer, page + * kind would not need to be included in the modifier itself, but + * since the modifier should define the layout of the associated + * memory buffer independent from any device or other context, it + * must be included here. + * + * 21:20 g GOB Height and Page Kind Generation. The height of a GOB changed + * starting with Fermi GPUs. Additionally, the mapping between page + * kind and bit layout has changed at various points. + * + * 0 = Gob Height 8, Fermi - Volta, Tegra K1+ Page Kind mapping + * 1 = Gob Height 4, G80 - GT2XX Page Kind mapping + * 2 = Gob Height 8, Turing+ Page Kind mapping + * 3 = Reserved for future use. + * + * 22:22 s Sector layout. On Tegra GPUs prior to Xavier, there is a further + * bit remapping step that occurs at an even lower level than the + * page kind and block linear swizzles. This causes the layout of + * surfaces mapped in those SOC's GPUs to be incompatible with the + * equivalent mapping on other GPUs in the same system. + * + * 0 = Tegra K1 - Tegra Parker/TX2 Layout. + * 1 = Desktop GPU and Tegra Xavier+ Layout + * + * 25:23 c Lossless Framebuffer Compression type. + * + * 0 = none + * 1 = ROP/3D, layout 1, exact compression format implied by Page + * Kind field + * 2 = ROP/3D, layout 2, exact compression format implied by Page + * Kind field + * 3 = CDE horizontal + * 4 = CDE vertical + * 5 = Reserved for future use + * 6 = Reserved for future use + * 7 = Reserved for future use + * + * 55:25 - Reserved for future use. Must be zero. + */ +#define DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(c, s, g, k, h) \ + fourcc_mod_code(NVIDIA, (0x10 | \ + ((h) & 0xf) | \ + (((k) & 0xff) << 12) | \ + (((g) & 0x3) << 20) | \ + (((s) & 0x1) << 22) | \ + (((c) & 0x7) << 23))) + +/* To grandfather in prior block linear format modifiers to the above layout, + * the page kind "0", which corresponds to "pitch/linear" and hence is unusable + * with block-linear layouts, is remapped within drivers to the value 0xfe, + * which corresponds to the "generic" kind used for simple single-sample + * uncompressed color formats on Fermi - Volta GPUs. + */ +static __inline__ __u64 +drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) +{ + if (!(modifier & 0x10) || (modifier & (0xff << 12))) + return modifier; + else + return modifier | (0xfe << 12); +} + +/* + * 16Bx2 Block Linear layout, used by Tegra K1 and later + * + * Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked + * vertically by a power of 2 (1 to 32 GOBs) to form a block. + * + * Within a GOB, data is ordered as 16B x 2 lines sectors laid in Z-shape. + * + * Parameter 'v' is the log2 encoding of the number of GOBs stacked vertically. + * Valid values are: + * + * 0 == ONE_GOB + * 1 == TWO_GOBS + * 2 == FOUR_GOBS + * 3 == EIGHT_GOBS + * 4 == SIXTEEN_GOBS + * 5 == THIRTYTWO_GOBS + * + * Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format + * in full detail. + */ +#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(v) \ + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 0, 0, 0, (v)) + +#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_ONE_GOB \ + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) +#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_TWO_GOB \ + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) +#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_FOUR_GOB \ + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) +#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_EIGHT_GOB \ + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) +#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_SIXTEEN_GOB \ + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) +#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_THIRTYTWO_GOB \ + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) + +/* + * Some Broadcom modifiers take parameters, for example the number of + * vertical lines in the image. Reserve the lower 32 bits for modifier + * type, and the next 24 bits for parameters. Top 8 bits are the + * vendor code. + */ +#define __fourcc_mod_broadcom_param_shift 8 +#define __fourcc_mod_broadcom_param_bits 48 +#define fourcc_mod_broadcom_code(val, params) \ + fourcc_mod_code(BROADCOM, ((((__u64)params) << __fourcc_mod_broadcom_param_shift) | val)) +#define fourcc_mod_broadcom_param(m) \ + ((int)(((m) >> __fourcc_mod_broadcom_param_shift) & \ + ((1ULL << __fourcc_mod_broadcom_param_bits) - 1))) +#define fourcc_mod_broadcom_mod(m) \ + ((m) & ~(((1ULL << __fourcc_mod_broadcom_param_bits) - 1) << \ + __fourcc_mod_broadcom_param_shift)) + +/* + * Broadcom VC4 "T" format + * + * This is the primary layout that the V3D GPU can texture from (it + * can't do linear). The T format has: + * + * - 64b utiles of pixels in a raster-order grid according to cpp. It's 4x4 + * pixels at 32 bit depth. + * + * - 1k subtiles made of a 4x4 raster-order grid of 64b utiles (so usually + * 16x16 pixels). + * + * - 4k tiles made of a 2x2 grid of 1k subtiles (so usually 32x32 pixels). On + * even 4k tile rows, they're arranged as (BL, TL, TR, BR), and on odd rows + * they're (TR, BR, BL, TL), where bottom left is start of memory. + * + * - an image made of 4k tiles in rows either left-to-right (even rows of 4k + * tiles) or right-to-left (odd rows of 4k tiles). + */ +#define DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED fourcc_mod_code(BROADCOM, 1) + +/* + * Broadcom SAND format + * + * This is the native format that the H.264 codec block uses. For VC4 + * HVS, it is only valid for H.264 (NV12/21) and RGBA modes. + * + * The image can be considered to be split into columns, and the + * columns are placed consecutively into memory. The width of those + * columns can be either 32, 64, 128, or 256 pixels, but in practice + * only 128 pixel columns are used. + * + * The pitch between the start of each column is set to optimally + * switch between SDRAM banks. This is passed as the number of lines + * of column width in the modifier (we can't use the stride value due + * to various core checks that look at it , so you should set the + * stride to width*cpp). + * + * Note that the column height for this format modifier is the same + * for all of the planes, assuming that each column contains both Y + * and UV. Some SAND-using hardware stores UV in a separate tiled + * image from Y to reduce the column height, which is not supported + * with these modifiers. + * + * The DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT modifier is also + * supported for DRM_FORMAT_P030 where the columns remain as 128 bytes + * wide, but as this is a 10 bpp format that translates to 96 pixels. + */ + +#define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \ + fourcc_mod_broadcom_code(2, v) +#define DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(v) \ + fourcc_mod_broadcom_code(3, v) +#define DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(v) \ + fourcc_mod_broadcom_code(4, v) +#define DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(v) \ + fourcc_mod_broadcom_code(5, v) + +#define DRM_FORMAT_MOD_BROADCOM_SAND32 \ + DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(0) +#define DRM_FORMAT_MOD_BROADCOM_SAND64 \ + DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(0) +#define DRM_FORMAT_MOD_BROADCOM_SAND128 \ + DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(0) +#define DRM_FORMAT_MOD_BROADCOM_SAND256 \ + DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(0) + +/* Broadcom UIF format + * + * This is the common format for the current Broadcom multimedia + * blocks, including V3D 3.x and newer, newer video codecs, and + * displays. + * + * The image consists of utiles (64b blocks), UIF blocks (2x2 utiles), + * and macroblocks (4x4 UIF blocks). Those 4x4 UIF block groups are + * stored in columns, with padding between the columns to ensure that + * moving from one column to the next doesn't hit the same SDRAM page + * bank. + * + * To calculate the padding, it is assumed that each hardware block + * and the software driving it knows the platform's SDRAM page size, + * number of banks, and XOR address, and that it's identical between + * all blocks using the format. This tiling modifier will use XOR as + * necessary to reduce the padding. If a hardware block can't do XOR, + * the assumption is that a no-XOR tiling modifier will be created. + */ +#define DRM_FORMAT_MOD_BROADCOM_UIF fourcc_mod_code(BROADCOM, 6) + +/* + * Arm Framebuffer Compression (AFBC) modifiers + * + * AFBC is a proprietary lossless image compression protocol and format. + * It provides fine-grained random access and minimizes the amount of data + * transferred between IP blocks. + * + * AFBC has several features which may be supported and/or used, which are + * represented using bits in the modifier. Not all combinations are valid, + * and different devices or use-cases may support different combinations. + * + * Further information on the use of AFBC modifiers can be found in + * Documentation/gpu/afbc.rst + */ + +/* + * The top 4 bits (out of the 56 bits allotted for specifying vendor specific + * modifiers) denote the category for modifiers. Currently we have three + * categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of + * sixteen different categories. + */ +#define DRM_FORMAT_MOD_ARM_CODE(__type, __val) \ + fourcc_mod_code(ARM, ((__u64)(__type) << 52) | ((__val) & 0x000fffffffffffffULL)) + +#define DRM_FORMAT_MOD_ARM_TYPE_AFBC 0x00 +#define DRM_FORMAT_MOD_ARM_TYPE_MISC 0x01 + +#define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode) \ + DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_AFBC, __afbc_mode) + +/* + * AFBC superblock size + * + * Indicates the superblock size(s) used for the AFBC buffer. The buffer + * size (in pixels) must be aligned to a multiple of the superblock size. + * Four lowest significant bits(LSBs) are reserved for block size. + * + * Where one superblock size is specified, it applies to all planes of the + * buffer (e.g. 16x16, 32x8). When multiple superblock sizes are specified, + * the first applies to the Luma plane and the second applies to the Chroma + * plane(s). e.g. (32x8_64x4 means 32x8 Luma, with 64x4 Chroma). + * Multiple superblock sizes are only valid for multi-plane YCbCr formats. + */ +#define AFBC_FORMAT_MOD_BLOCK_SIZE_MASK 0xf +#define AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 (1ULL) +#define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 (2ULL) +#define AFBC_FORMAT_MOD_BLOCK_SIZE_64x4 (3ULL) +#define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8_64x4 (4ULL) + +/* + * AFBC lossless colorspace transform + * + * Indicates that the buffer makes use of the AFBC lossless colorspace + * transform. + */ +#define AFBC_FORMAT_MOD_YTR (1ULL << 4) + +/* + * AFBC block-split + * + * Indicates that the payload of each superblock is split. The second + * half of the payload is positioned at a predefined offset from the start + * of the superblock payload. + */ +#define AFBC_FORMAT_MOD_SPLIT (1ULL << 5) + +/* + * AFBC sparse layout + * + * This flag indicates that the payload of each superblock must be stored at a + * predefined position relative to the other superblocks in the same AFBC + * buffer. This order is the same order used by the header buffer. In this mode + * each superblock is given the same amount of space as an uncompressed + * superblock of the particular format would require, rounding up to the next + * multiple of 128 bytes in size. + */ +#define AFBC_FORMAT_MOD_SPARSE (1ULL << 6) + +/* + * AFBC copy-block restrict + * + * Buffers with this flag must obey the copy-block restriction. The restriction + * is such that there are no copy-blocks referring across the border of 8x8 + * blocks. For the subsampled data the 8x8 limitation is also subsampled. + */ +#define AFBC_FORMAT_MOD_CBR (1ULL << 7) + +/* + * AFBC tiled layout + * + * The tiled layout groups superblocks in 8x8 or 4x4 tiles, where all + * superblocks inside a tile are stored together in memory. 8x8 tiles are used + * for pixel formats up to and including 32 bpp while 4x4 tiles are used for + * larger bpp formats. The order between the tiles is scan line. + * When the tiled layout is used, the buffer size (in pixels) must be aligned + * to the tile size. + */ +#define AFBC_FORMAT_MOD_TILED (1ULL << 8) + +/* + * AFBC solid color blocks + * + * Indicates that the buffer makes use of solid-color blocks, whereby bandwidth + * can be reduced if a whole superblock is a single color. + */ +#define AFBC_FORMAT_MOD_SC (1ULL << 9) + +/* + * AFBC double-buffer + * + * Indicates that the buffer is allocated in a layout safe for front-buffer + * rendering. + */ +#define AFBC_FORMAT_MOD_DB (1ULL << 10) + +/* + * AFBC buffer content hints + * + * Indicates that the buffer includes per-superblock content hints. + */ +#define AFBC_FORMAT_MOD_BCH (1ULL << 11) + +/* AFBC uncompressed storage mode + * + * Indicates that the buffer is using AFBC uncompressed storage mode. + * In this mode all superblock payloads in the buffer use the uncompressed + * storage mode, which is usually only used for data which cannot be compressed. + * The buffer layout is the same as for AFBC buffers without USM set, this only + * affects the storage mode of the individual superblocks. Note that even a + * buffer without USM set may use uncompressed storage mode for some or all + * superblocks, USM just guarantees it for all. + */ +#define AFBC_FORMAT_MOD_USM (1ULL << 12) + +/* + * Arm Fixed-Rate Compression (AFRC) modifiers + * + * AFRC is a proprietary fixed rate image compression protocol and format, + * designed to provide guaranteed bandwidth and memory footprint + * reductions in graphics and media use-cases. + * + * AFRC buffers consist of one or more planes, with the same components + * and meaning as an uncompressed buffer using the same pixel format. + * + * Within each plane, the pixel/luma/chroma values are grouped into + * "coding unit" blocks which are individually compressed to a + * fixed size (in bytes). All coding units within a given plane of a buffer + * store the same number of values, and have the same compressed size. + * + * The coding unit size is configurable, allowing different rates of compression. + * + * The start of each AFRC buffer plane must be aligned to an alignment granule which + * depends on the coding unit size. + * + * Coding Unit Size Plane Alignment + * ---------------- --------------- + * 16 bytes 1024 bytes + * 24 bytes 512 bytes + * 32 bytes 2048 bytes + * + * Coding units are grouped into paging tiles. AFRC buffer dimensions must be aligned + * to a multiple of the paging tile dimensions. + * The dimensions of each paging tile depend on whether the buffer is optimised for + * scanline (SCAN layout) or rotated (ROT layout) access. + * + * Layout Paging Tile Width Paging Tile Height + * ------ ----------------- ------------------ + * SCAN 16 coding units 4 coding units + * ROT 8 coding units 8 coding units + * + * The dimensions of each coding unit depend on the number of components + * in the compressed plane and whether the buffer is optimised for + * scanline (SCAN layout) or rotated (ROT layout) access. + * + * Number of Components in Plane Layout Coding Unit Width Coding Unit Height + * ----------------------------- --------- ----------------- ------------------ + * 1 SCAN 16 samples 4 samples + * Example: 16x4 luma samples in a 'Y' plane + * 16x4 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer + * ----------------------------- --------- ----------------- ------------------ + * 1 ROT 8 samples 8 samples + * Example: 8x8 luma samples in a 'Y' plane + * 8x8 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer + * ----------------------------- --------- ----------------- ------------------ + * 2 DONT CARE 8 samples 4 samples + * Example: 8x4 chroma pairs in the 'UV' plane of a semi-planar YUV buffer + * ----------------------------- --------- ----------------- ------------------ + * 3 DONT CARE 4 samples 4 samples + * Example: 4x4 pixels in an RGB buffer without alpha + * ----------------------------- --------- ----------------- ------------------ + * 4 DONT CARE 4 samples 4 samples + * Example: 4x4 pixels in an RGB buffer with alpha + */ + +#define DRM_FORMAT_MOD_ARM_TYPE_AFRC 0x02 + +#define DRM_FORMAT_MOD_ARM_AFRC(__afrc_mode) \ + DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_AFRC, __afrc_mode) + +/* + * AFRC coding unit size modifier. + * + * Indicates the number of bytes used to store each compressed coding unit for + * one or more planes in an AFRC encoded buffer. The coding unit size for chrominance + * is the same for both Cb and Cr, which may be stored in separate planes. + * + * AFRC_FORMAT_MOD_CU_SIZE_P0 indicates the number of bytes used to store + * each compressed coding unit in the first plane of the buffer. For RGBA buffers + * this is the only plane, while for semi-planar and fully-planar YUV buffers, + * this corresponds to the luma plane. + * + * AFRC_FORMAT_MOD_CU_SIZE_P12 indicates the number of bytes used to store + * each compressed coding unit in the second and third planes in the buffer. + * For semi-planar and fully-planar YUV buffers, this corresponds to the chroma plane(s). + * + * For single-plane buffers, AFRC_FORMAT_MOD_CU_SIZE_P0 must be specified + * and AFRC_FORMAT_MOD_CU_SIZE_P12 must be zero. + * For semi-planar and fully-planar buffers, both AFRC_FORMAT_MOD_CU_SIZE_P0 and + * AFRC_FORMAT_MOD_CU_SIZE_P12 must be specified. + */ +#define AFRC_FORMAT_MOD_CU_SIZE_MASK 0xf +#define AFRC_FORMAT_MOD_CU_SIZE_16 (1ULL) +#define AFRC_FORMAT_MOD_CU_SIZE_24 (2ULL) +#define AFRC_FORMAT_MOD_CU_SIZE_32 (3ULL) + +#define AFRC_FORMAT_MOD_CU_SIZE_P0(__afrc_cu_size) (__afrc_cu_size) +#define AFRC_FORMAT_MOD_CU_SIZE_P12(__afrc_cu_size) ((__afrc_cu_size) << 4) + +/* + * AFRC scanline memory layout. + * + * Indicates if the buffer uses the scanline-optimised layout + * for an AFRC encoded buffer, otherwise, it uses the rotation-optimised layout. + * The memory layout is the same for all planes. + */ +#define AFRC_FORMAT_MOD_LAYOUT_SCAN (1ULL << 8) + +/* + * Arm 16x16 Block U-Interleaved modifier + * + * This is used by Arm Mali Utgard and Midgard GPUs. It divides the image + * into 16x16 pixel blocks. Blocks are stored linearly in order, but pixels + * in the block are reordered. + */ +#define DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED \ + DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_MISC, 1ULL) + +/* + * Allwinner tiled modifier + * + * This tiling mode is implemented by the VPU found on all Allwinner platforms, + * codenamed sunxi. It is associated with a YUV format that uses either 2 or 3 + * planes. + * + * With this tiling, the luminance samples are disposed in tiles representing + * 32x32 pixels and the chrominance samples in tiles representing 32x64 pixels. + * The pixel order in each tile is linear and the tiles are disposed linearly, + * both in row-major order. + */ +#define DRM_FORMAT_MOD_ALLWINNER_TILED fourcc_mod_code(ALLWINNER, 1) + +/* + * Amlogic Video Framebuffer Compression modifiers + * + * Amlogic uses a proprietary lossless image compression protocol and format + * for their hardware video codec accelerators, either video decoders or + * video input encoders. + * + * It considerably reduces memory bandwidth while writing and reading + * frames in memory. + * + * The underlying storage is considered to be 3 components, 8bit or 10-bit + * per component YCbCr 420, single plane : + * - DRM_FORMAT_YUV420_8BIT + * - DRM_FORMAT_YUV420_10BIT + * + * The first 8 bits of the mode defines the layout, then the following 8 bits + * defines the options changing the layout. + * + * Not all combinations are valid, and different SoCs may support different + * combinations of layout and options. + */ +#define __fourcc_mod_amlogic_layout_mask 0xff +#define __fourcc_mod_amlogic_options_shift 8 +#define __fourcc_mod_amlogic_options_mask 0xff + +#define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \ + fourcc_mod_code(AMLOGIC, \ + ((__layout) & __fourcc_mod_amlogic_layout_mask) | \ + (((__options) & __fourcc_mod_amlogic_options_mask) \ + << __fourcc_mod_amlogic_options_shift)) + +/* Amlogic FBC Layouts */ + +/* + * Amlogic FBC Basic Layout + * + * The basic layout is composed of: + * - a body content organized in 64x32 superblocks with 4096 bytes per + * superblock in default mode. + * - a 32 bytes per 128x64 header block + * + * This layout is transferrable between Amlogic SoCs supporting this modifier. + */ +#define AMLOGIC_FBC_LAYOUT_BASIC (1ULL) + +/* + * Amlogic FBC Scatter Memory layout + * + * Indicates the header contains IOMMU references to the compressed + * frames content to optimize memory access and layout. + * + * In this mode, only the header memory address is needed, thus the + * content memory organization is tied to the current producer + * execution and cannot be saved/dumped neither transferrable between + * Amlogic SoCs supporting this modifier. + * + * Due to the nature of the layout, these buffers are not expected to + * be accessible by the user-space clients, but only accessible by the + * hardware producers and consumers. + * + * The user-space clients should expect a failure while trying to mmap + * the DMA-BUF handle returned by the producer. + */ +#define AMLOGIC_FBC_LAYOUT_SCATTER (2ULL) + +/* Amlogic FBC Layout Options Bit Mask */ + +/* + * Amlogic FBC Memory Saving mode + * + * Indicates the storage is packed when pixel size is multiple of word + * boundaries, i.e. 8bit should be stored in this mode to save allocation + * memory. + * + * This mode reduces body layout to 3072 bytes per 64x32 superblock with + * the basic layout and 3200 bytes per 64x32 superblock combined with + * the scatter layout. + */ +#define AMLOGIC_FBC_OPTION_MEM_SAVING (1ULL << 0) + +/* + * AMD modifiers + * + * Memory layout: + * + * without DCC: + * - main surface + * + * with DCC & without DCC_RETILE: + * - main surface in plane 0 + * - DCC surface in plane 1 (RB-aligned, pipe-aligned if DCC_PIPE_ALIGN is set) + * + * with DCC & DCC_RETILE: + * - main surface in plane 0 + * - displayable DCC surface in plane 1 (not RB-aligned & not pipe-aligned) + * - pipe-aligned DCC surface in plane 2 (RB-aligned & pipe-aligned) + * + * For multi-plane formats the above surfaces get merged into one plane for + * each format plane, based on the required alignment only. + * + * Bits Parameter Notes + * ----- ------------------------ --------------------------------------------- + * + * 7:0 TILE_VERSION Values are AMD_FMT_MOD_TILE_VER_* + * 12:8 TILE Values are AMD_FMT_MOD_TILE__* + * 13 DCC + * 14 DCC_RETILE + * 15 DCC_PIPE_ALIGN + * 16 DCC_INDEPENDENT_64B + * 17 DCC_INDEPENDENT_128B + * 19:18 DCC_MAX_COMPRESSED_BLOCK Values are AMD_FMT_MOD_DCC_BLOCK_* + * 20 DCC_CONSTANT_ENCODE + * 23:21 PIPE_XOR_BITS Only for some chips + * 26:24 BANK_XOR_BITS Only for some chips + * 29:27 PACKERS Only for some chips + * 32:30 RB Only for some chips + * 35:33 PIPE Only for some chips + * 55:36 - Reserved for future use, must be zero + */ +#define AMD_FMT_MOD fourcc_mod_code(AMD, 0) + +#define IS_AMD_FMT_MOD(val) (((val) >> 56) == DRM_FORMAT_MOD_VENDOR_AMD) + +/* Reserve 0 for GFX8 and older */ +#define AMD_FMT_MOD_TILE_VER_GFX9 1 +#define AMD_FMT_MOD_TILE_VER_GFX10 2 +#define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3 +#define AMD_FMT_MOD_TILE_VER_GFX11 4 + +/* + * 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical + * version. + */ +#define AMD_FMT_MOD_TILE_GFX9_64K_S 9 + +/* + * 64K_D for non-32 bpp is the same for GFX9/GFX10/GFX10_RBPLUS and hence has + * GFX9 as canonical version. + */ +#define AMD_FMT_MOD_TILE_GFX9_64K_D 10 +#define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25 +#define AMD_FMT_MOD_TILE_GFX9_64K_D_X 26 +#define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27 +#define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31 + +#define AMD_FMT_MOD_DCC_BLOCK_64B 0 +#define AMD_FMT_MOD_DCC_BLOCK_128B 1 +#define AMD_FMT_MOD_DCC_BLOCK_256B 2 + +#define AMD_FMT_MOD_TILE_VERSION_SHIFT 0 +#define AMD_FMT_MOD_TILE_VERSION_MASK 0xFF +#define AMD_FMT_MOD_TILE_SHIFT 8 +#define AMD_FMT_MOD_TILE_MASK 0x1F + +/* Whether DCC compression is enabled. */ +#define AMD_FMT_MOD_DCC_SHIFT 13 +#define AMD_FMT_MOD_DCC_MASK 0x1 + +/* + * Whether to include two DCC surfaces, one which is rb & pipe aligned, and + * one which is not-aligned. + */ +#define AMD_FMT_MOD_DCC_RETILE_SHIFT 14 +#define AMD_FMT_MOD_DCC_RETILE_MASK 0x1 + +/* Only set if DCC_RETILE = false */ +#define AMD_FMT_MOD_DCC_PIPE_ALIGN_SHIFT 15 +#define AMD_FMT_MOD_DCC_PIPE_ALIGN_MASK 0x1 + +#define AMD_FMT_MOD_DCC_INDEPENDENT_64B_SHIFT 16 +#define AMD_FMT_MOD_DCC_INDEPENDENT_64B_MASK 0x1 +#define AMD_FMT_MOD_DCC_INDEPENDENT_128B_SHIFT 17 +#define AMD_FMT_MOD_DCC_INDEPENDENT_128B_MASK 0x1 +#define AMD_FMT_MOD_DCC_MAX_COMPRESSED_BLOCK_SHIFT 18 +#define AMD_FMT_MOD_DCC_MAX_COMPRESSED_BLOCK_MASK 0x3 + +/* + * DCC supports embedding some clear colors directly in the DCC surface. + * However, on older GPUs the rendering HW ignores the embedded clear color + * and prefers the driver provided color. This necessitates doing a fastclear + * eliminate operation before a process transfers control. + * + * If this bit is set that means the fastclear eliminate is not needed for these + * embeddable colors. + */ +#define AMD_FMT_MOD_DCC_CONSTANT_ENCODE_SHIFT 20 +#define AMD_FMT_MOD_DCC_CONSTANT_ENCODE_MASK 0x1 + +/* + * The below fields are for accounting for per GPU differences. These are only + * relevant for GFX9 and later and if the tile field is *_X/_T. + * + * PIPE_XOR_BITS = always needed + * BANK_XOR_BITS = only for TILE_VER_GFX9 + * PACKERS = only for TILE_VER_GFX10_RBPLUS + * RB = only for TILE_VER_GFX9 & DCC + * PIPE = only for TILE_VER_GFX9 & DCC & (DCC_RETILE | DCC_PIPE_ALIGN) + */ +#define AMD_FMT_MOD_PIPE_XOR_BITS_SHIFT 21 +#define AMD_FMT_MOD_PIPE_XOR_BITS_MASK 0x7 +#define AMD_FMT_MOD_BANK_XOR_BITS_SHIFT 24 +#define AMD_FMT_MOD_BANK_XOR_BITS_MASK 0x7 +#define AMD_FMT_MOD_PACKERS_SHIFT 27 +#define AMD_FMT_MOD_PACKERS_MASK 0x7 +#define AMD_FMT_MOD_RB_SHIFT 30 +#define AMD_FMT_MOD_RB_MASK 0x7 +#define AMD_FMT_MOD_PIPE_SHIFT 33 +#define AMD_FMT_MOD_PIPE_MASK 0x7 + +#define AMD_FMT_MOD_SET(field, value) \ + ((__u64)(value) << AMD_FMT_MOD_##field##_SHIFT) +#define AMD_FMT_MOD_GET(field, value) \ + (((value) >> AMD_FMT_MOD_##field##_SHIFT) & AMD_FMT_MOD_##field##_MASK) +#define AMD_FMT_MOD_CLEAR(field) \ + (~((__u64)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT)) + +/* Mobile Industry Processor Interface (MIPI) modifiers */ + +/* + * MIPI CSI-2 packing layout + * + * The CSI-2 RAW formats (for example Bayer) use a different packing layout + * depenindg on the sample size. + * + * - 10-bits per sample + * Every four consecutive samples are packed into 5 bytes. Each of the first 4 + * bytes contain the 8 high order bits of the pixels, and the 5th byte + * contains the 2 least-significant bits of each pixel, in the same order. + * + * - 12-bits per sample + * Every two consecutive samples are packed into three bytes. Each of the + * first two bytes contain the 8 high order bits of the pixels, and the third + * byte contains the four least-significant bits of each pixel, in the same + * order. + * + * - 14-bits per sample + * Every four consecutive samples are packed into seven bytes. Each of the + * first four bytes contain the eight high order bits of the pixels, and the + * three following bytes contains the six least-significant bits of each + * pixel, in the same order. + */ +#define MIPI_FORMAT_MOD_CSI2_PACKED fourcc_mod_code(MIPI, 1) + +#define PISP_FORMAT_MOD_COMPRESS_MODE1 fourcc_mod_code(RPI, 1) +#define PISP_FORMAT_MOD_COMPRESS_MODE2 fourcc_mod_code(RPI, 2) + +#if defined(__cplusplus) +} +#endif + +#endif /* DRM_FOURCC_H */ diff --git a/spider-cam/libcamera/include/linux/intel-ipu3.h b/spider-cam/libcamera/include/linux/intel-ipu3.h new file mode 100644 index 0000000..8c192f3 --- /dev/null +++ b/spider-cam/libcamera/include/linux/intel-ipu3.h @@ -0,0 +1,2819 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* Copyright (C) 2017 - 2018 Intel Corporation */ + +#ifndef __IPU3_UAPI_H +#define __IPU3_UAPI_H + +#include + +/* from /drivers/staging/media/ipu3/include/videodev2.h */ + +/* Vendor specific - used for IPU3 camera sub-system */ +/* IPU3 processing parameters */ +#define V4L2_META_FMT_IPU3_PARAMS v4l2_fourcc('i', 'p', '3', 'p') +/* IPU3 3A statistics */ +#define V4L2_META_FMT_IPU3_STAT_3A v4l2_fourcc('i', 'p', '3', 's') + +/* from include/uapi/linux/v4l2-controls.h */ +#define V4L2_CID_INTEL_IPU3_BASE (V4L2_CID_USER_BASE + 0x10c0) +#define V4L2_CID_INTEL_IPU3_MODE (V4L2_CID_INTEL_IPU3_BASE + 1) + +/******************* ipu3_uapi_stats_3a *******************/ + +#define IPU3_UAPI_MAX_STRIPES 2 +#define IPU3_UAPI_MAX_BUBBLE_SIZE 10 + +#define IPU3_UAPI_GRID_START_MASK ((1 << 12) - 1) +#define IPU3_UAPI_GRID_Y_START_EN (1 << 15) + +/* controls generation of meta_data (like FF enable/disable) */ +#define IPU3_UAPI_AWB_RGBS_THR_B_EN (1 << 14) +#define IPU3_UAPI_AWB_RGBS_THR_B_INCL_SAT (1 << 15) + +/** + * struct ipu3_uapi_grid_config - Grid plane config + * + * @width: Grid horizontal dimensions, in number of grid blocks(cells). + * For AWB, the range is (16, 80). + * For AF/AE, the range is (16, 32). + * @height: Grid vertical dimensions, in number of grid cells. + * For AWB, the range is (16, 60). + * For AF/AE, the range is (16, 24). + * @block_width_log2: Log2 of the width of each cell in pixels. + * For AWB, the range is [3, 6]. + * For AF/AE, the range is [3, 7]. + * @block_height_log2: Log2 of the height of each cell in pixels. + * For AWB, the range is [3, 6]. + * For AF/AE, the range is [3, 7]. + * @height_per_slice: The number of blocks in vertical axis per slice. + * Default 2. + * @x_start: X value of top left corner of Region of Interest(ROI). + * @y_start: Y value of top left corner of ROI + * @x_end: X value of bottom right corner of ROI + * @y_end: Y value of bottom right corner of ROI + * + * Due to the size of total amount of collected data, most statistics + * create a grid-based output, and the data is then divided into "slices". + */ +struct ipu3_uapi_grid_config { + __u8 width; + __u8 height; + __u16 block_width_log2:3; + __u16 block_height_log2:3; + __u16 height_per_slice:8; + __u16 x_start; + __u16 y_start; + __u16 x_end; + __u16 y_end; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_awb_set_item - Memory layout for each cell in AWB + * + * @Gr_avg: Green average for red lines in the cell. + * @R_avg: Red average in the cell. + * @B_avg: Blue average in the cell. + * @Gb_avg: Green average for blue lines in the cell. + * @sat_ratio: Percentage of pixels over the thresholds specified in + * ipu3_uapi_awb_config_s, coded from 0 to 255. + * @padding0: Unused byte for padding. + * @padding1: Unused byte for padding. + * @padding2: Unused byte for padding. + */ +struct ipu3_uapi_awb_set_item { + __u8 Gr_avg; + __u8 R_avg; + __u8 B_avg; + __u8 Gb_avg; + __u8 sat_ratio; + __u8 padding0; + __u8 padding1; + __u8 padding2; +} __attribute__((packed)); + +/* + * The grid based data is divided into "slices" called set, each slice of setX + * refers to ipu3_uapi_grid_config width * height_per_slice. + */ +#define IPU3_UAPI_AWB_MAX_SETS 60 +/* Based on grid size 80 * 60 and cell size 16 x 16 */ +#define IPU3_UAPI_AWB_SET_SIZE 160 +#define IPU3_UAPI_AWB_SPARE_FOR_BUBBLES \ + (IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES) +#define IPU3_UAPI_AWB_MAX_BUFFER_SIZE \ + (IPU3_UAPI_AWB_MAX_SETS * \ + (IPU3_UAPI_AWB_SET_SIZE + IPU3_UAPI_AWB_SPARE_FOR_BUBBLES)) + +/** + * struct ipu3_uapi_awb_raw_buffer - AWB raw buffer + * + * @meta_data: buffer to hold auto white balance meta data which is + * the average values for each color channel. + */ +struct ipu3_uapi_awb_raw_buffer { + struct ipu3_uapi_awb_set_item meta_data[IPU3_UAPI_AWB_MAX_BUFFER_SIZE] + __attribute__((aligned(32))); +} __attribute__((packed)); + +/** + * struct ipu3_uapi_awb_config_s - AWB config + * + * @rgbs_thr_gr: gr threshold value. + * @rgbs_thr_r: Red threshold value. + * @rgbs_thr_gb: gb threshold value. + * @rgbs_thr_b: Blue threshold value. + * @grid: &ipu3_uapi_grid_config, the default grid resolution is 16x16 cells. + * + * The threshold is a saturation measure range [0, 8191], 8191 is default. + * Values over threshold may be optionally rejected for averaging. + */ +struct ipu3_uapi_awb_config_s { + __u16 rgbs_thr_gr; + __u16 rgbs_thr_r; + __u16 rgbs_thr_gb; + __u16 rgbs_thr_b; + struct ipu3_uapi_grid_config grid; +} __attribute__((aligned(32))) __attribute__((packed)); + +/** + * struct ipu3_uapi_awb_config - AWB config wrapper + * + * @config: config for auto white balance as defined by &ipu3_uapi_awb_config_s + */ +struct ipu3_uapi_awb_config { + struct ipu3_uapi_awb_config_s config __attribute__((aligned(32))); +} __attribute__((packed)); + +#define IPU3_UAPI_AE_COLORS 4 /* R, G, B, Y */ +#define IPU3_UAPI_AE_BINS 256 +#define IPU3_UAPI_AE_WEIGHTS 96 + +/** + * struct ipu3_uapi_ae_raw_buffer - AE global weighted histogram + * + * @vals: Sum of IPU3_UAPI_AE_COLORS in cell + * + * Each histogram contains IPU3_UAPI_AE_BINS bins. Each bin has 24 bit unsigned + * for counting the number of the pixel. + */ +struct ipu3_uapi_ae_raw_buffer { + __u32 vals[IPU3_UAPI_AE_BINS * IPU3_UAPI_AE_COLORS]; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_ae_raw_buffer_aligned - AE raw buffer + * + * @buff: &ipu3_uapi_ae_raw_buffer to hold full frame meta data. + */ +struct ipu3_uapi_ae_raw_buffer_aligned { + struct ipu3_uapi_ae_raw_buffer buff __attribute__((aligned(32))); +} __attribute__((packed)); + +/** + * struct ipu3_uapi_ae_grid_config - AE weight grid + * + * @width: Grid horizontal dimensions. Value: [16, 32], default 16. + * @height: Grid vertical dimensions. Value: [16, 24], default 16. + * @block_width_log2: Log2 of the width of the grid cell, value: [3, 7]. + * @block_height_log2: Log2 of the height of the grid cell, value: [3, 7]. + * default is 3 (cell size 8x8), 4 cell per grid. + * @reserved0: reserved + * @ae_en: 0: does not write to &ipu3_uapi_ae_raw_buffer_aligned array, + * 1: write normally. + * @rst_hist_array: write 1 to trigger histogram array reset. + * @done_rst_hist_array: flag for histogram array reset done. + * @x_start: X value of top left corner of ROI, default 0. + * @y_start: Y value of top left corner of ROI, default 0. + * @x_end: X value of bottom right corner of ROI + * @y_end: Y value of bottom right corner of ROI + * + * The AE block accumulates 4 global weighted histograms(R, G, B, Y) over + * a defined ROI within the frame. The contribution of each pixel into the + * histogram, defined by &ipu3_uapi_ae_weight_elem LUT, is indexed by a grid. + */ +struct ipu3_uapi_ae_grid_config { + __u8 width; + __u8 height; + __u8 block_width_log2:4; + __u8 block_height_log2:4; + __u8 reserved0:5; + __u8 ae_en:1; + __u8 rst_hist_array:1; + __u8 done_rst_hist_array:1; + __u16 x_start; + __u16 y_start; + __u16 x_end; + __u16 y_end; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_ae_weight_elem - AE weights LUT + * + * @cell0: weighted histogram grid value. + * @cell1: weighted histogram grid value. + * @cell2: weighted histogram grid value. + * @cell3: weighted histogram grid value. + * @cell4: weighted histogram grid value. + * @cell5: weighted histogram grid value. + * @cell6: weighted histogram grid value. + * @cell7: weighted histogram grid value. + * + * Use weighted grid value to give a different contribution factor to each cell. + * Precision u4, range [0, 15]. + */ +struct ipu3_uapi_ae_weight_elem { + __u32 cell0:4; + __u32 cell1:4; + __u32 cell2:4; + __u32 cell3:4; + __u32 cell4:4; + __u32 cell5:4; + __u32 cell6:4; + __u32 cell7:4; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_ae_ccm - AE coefficients for WB and CCM + * + * @gain_gr: WB gain factor for the gr channels. Default 256. + * @gain_r: WB gain factor for the r channel. Default 256. + * @gain_b: WB gain factor for the b channel. Default 256. + * @gain_gb: WB gain factor for the gb channels. Default 256. + * @mat: 4x4 matrix that transforms Bayer quad output from WB to RGB+Y. + * + * Default: + * 128, 0, 0, 0, + * 0, 128, 0, 0, + * 0, 0, 128, 0, + * 0, 0, 0, 128, + * + * As part of the raw frame pre-process stage, the WB and color conversion need + * to be applied to expose the impact of these gain operations. + */ +struct ipu3_uapi_ae_ccm { + __u16 gain_gr; + __u16 gain_r; + __u16 gain_b; + __u16 gain_gb; + __s16 mat[16]; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_ae_config - AE config + * + * @grid_cfg: config for auto exposure statistics grid. See struct + * &ipu3_uapi_ae_grid_config, as Imgu did not support output + * auto exposure statistics, so user can ignore this configuration + * and use the RGB table in auto-whitebalance statistics instead. + * @weights: &IPU3_UAPI_AE_WEIGHTS is based on 32x24 blocks in the grid. + * Each grid cell has a corresponding value in weights LUT called + * grid value, global histogram is updated based on grid value and + * pixel value. + * @ae_ccm: Color convert matrix pre-processing block. + * + * Calculate AE grid from image resolution, resample ae weights. + */ +struct ipu3_uapi_ae_config { + struct ipu3_uapi_ae_grid_config grid_cfg __attribute__((aligned(32))); + struct ipu3_uapi_ae_weight_elem weights[IPU3_UAPI_AE_WEIGHTS] + __attribute__((aligned(32))); + struct ipu3_uapi_ae_ccm ae_ccm __attribute__((aligned(32))); +} __attribute__((packed)); + +/** + * struct ipu3_uapi_af_filter_config - AF 2D filter for contrast measurements + * + * @y1_coeff_0: filter Y1, structure: 3x11, support both symmetry and + * anti-symmetry type. A12 is center, A1-A11 are neighbours. + * for analyzing low frequency content, used to calculate sum + * of gradients in x direction. + * @y1_coeff_0.a1: filter1 coefficients A1, u8, default 0. + * @y1_coeff_0.a2: filter1 coefficients A2, u8, default 0. + * @y1_coeff_0.a3: filter1 coefficients A3, u8, default 0. + * @y1_coeff_0.a4: filter1 coefficients A4, u8, default 0. + * @y1_coeff_1: Struct + * @y1_coeff_1.a5: filter1 coefficients A5, u8, default 0. + * @y1_coeff_1.a6: filter1 coefficients A6, u8, default 0. + * @y1_coeff_1.a7: filter1 coefficients A7, u8, default 0. + * @y1_coeff_1.a8: filter1 coefficients A8, u8, default 0. + * @y1_coeff_2: Struct + * @y1_coeff_2.a9: filter1 coefficients A9, u8, default 0. + * @y1_coeff_2.a10: filter1 coefficients A10, u8, default 0. + * @y1_coeff_2.a11: filter1 coefficients A11, u8, default 0. + * @y1_coeff_2.a12: filter1 coefficients A12, u8, default 128. + * @y1_sign_vec: Each bit corresponds to one coefficient sign bit, + * 0: positive, 1: negative, default 0. + * @y2_coeff_0: Y2, same structure as Y1. For analyzing high frequency content. + * @y2_coeff_0.a1: filter2 coefficients A1, u8, default 0. + * @y2_coeff_0.a2: filter2 coefficients A2, u8, default 0. + * @y2_coeff_0.a3: filter2 coefficients A3, u8, default 0. + * @y2_coeff_0.a4: filter2 coefficients A4, u8, default 0. + * @y2_coeff_1: Struct + * @y2_coeff_1.a5: filter2 coefficients A5, u8, default 0. + * @y2_coeff_1.a6: filter2 coefficients A6, u8, default 0. + * @y2_coeff_1.a7: filter2 coefficients A7, u8, default 0. + * @y2_coeff_1.a8: filter2 coefficients A8, u8, default 0. + * @y2_coeff_2: Struct + * @y2_coeff_2.a9: filter1 coefficients A9, u8, default 0. + * @y2_coeff_2.a10: filter1 coefficients A10, u8, default 0. + * @y2_coeff_2.a11: filter1 coefficients A11, u8, default 0. + * @y2_coeff_2.a12: filter1 coefficients A12, u8, default 128. + * @y2_sign_vec: Each bit corresponds to one coefficient sign bit, + * 0: positive, 1: negative, default 0. + * @y_calc: Pre-processing that converts Bayer quad to RGB+Y values to be + * used for building histogram. Range [0, 32], default 8. + * Rule: + * y_gen_rate_gr + y_gen_rate_r + y_gen_rate_b + y_gen_rate_gb = 32 + * A single Y is calculated based on sum of Gr/R/B/Gb based on + * their contribution ratio. + * @y_calc.y_gen_rate_gr: Contribution ratio Gr for Y + * @y_calc.y_gen_rate_r: Contribution ratio R for Y + * @y_calc.y_gen_rate_b: Contribution ratio B for Y + * @y_calc.y_gen_rate_gb: Contribution ratio Gb for Y + * @nf: The shift right value that should be applied during the Y1/Y2 filter to + * make sure the total memory needed is 2 bytes per grid cell. + * @nf.reserved0: reserved + * @nf.y1_nf: Normalization factor for the convolution coeffs of y1, + * should be log2 of the sum of the abs values of the filter + * coeffs, default 7 (2^7 = 128). + * @nf.reserved1: reserved + * @nf.y2_nf: Normalization factor for y2, should be log2 of the sum of the + * abs values of the filter coeffs. + * @nf.reserved2: reserved + */ +struct ipu3_uapi_af_filter_config { + struct { + __u8 a1; + __u8 a2; + __u8 a3; + __u8 a4; + } y1_coeff_0; + struct { + __u8 a5; + __u8 a6; + __u8 a7; + __u8 a8; + } y1_coeff_1; + struct { + __u8 a9; + __u8 a10; + __u8 a11; + __u8 a12; + } y1_coeff_2; + + __u32 y1_sign_vec; + + struct { + __u8 a1; + __u8 a2; + __u8 a3; + __u8 a4; + } y2_coeff_0; + struct { + __u8 a5; + __u8 a6; + __u8 a7; + __u8 a8; + } y2_coeff_1; + struct { + __u8 a9; + __u8 a10; + __u8 a11; + __u8 a12; + } y2_coeff_2; + + __u32 y2_sign_vec; + + struct { + __u8 y_gen_rate_gr; + __u8 y_gen_rate_r; + __u8 y_gen_rate_b; + __u8 y_gen_rate_gb; + } y_calc; + + struct { + __u32 reserved0:8; + __u32 y1_nf:4; + __u32 reserved1:4; + __u32 y2_nf:4; + __u32 reserved2:12; + } nf; +} __attribute__((packed)); + +#define IPU3_UAPI_AF_MAX_SETS 24 +#define IPU3_UAPI_AF_MD_ITEM_SIZE 4 +#define IPU3_UAPI_AF_SPARE_FOR_BUBBLES \ + (IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES * \ + IPU3_UAPI_AF_MD_ITEM_SIZE) +#define IPU3_UAPI_AF_Y_TABLE_SET_SIZE 128 +#define IPU3_UAPI_AF_Y_TABLE_MAX_SIZE \ + (IPU3_UAPI_AF_MAX_SETS * \ + (IPU3_UAPI_AF_Y_TABLE_SET_SIZE + IPU3_UAPI_AF_SPARE_FOR_BUBBLES) * \ + IPU3_UAPI_MAX_STRIPES) + +/** + * struct ipu3_uapi_af_raw_buffer - AF meta data + * + * @y_table: Each color component will be convolved separately with filter1 + * and filter2 and the result will be summed out and averaged for + * each cell. + */ +struct ipu3_uapi_af_raw_buffer { + __u8 y_table[IPU3_UAPI_AF_Y_TABLE_MAX_SIZE] __attribute__((aligned(32))); +} __attribute__((packed)); + +/** + * struct ipu3_uapi_af_config_s - AF config + * + * @filter_config: AF uses Y1 and Y2 filters as configured in + * &ipu3_uapi_af_filter_config + * @padding: paddings + * @grid_cfg: See &ipu3_uapi_grid_config, default resolution 16x16. Use large + * grid size for large image and vice versa. + */ +struct ipu3_uapi_af_config_s { + struct ipu3_uapi_af_filter_config filter_config __attribute__((aligned(32))); + __u8 padding[4]; + struct ipu3_uapi_grid_config grid_cfg __attribute__((aligned(32))); +} __attribute__((packed)); + +#define IPU3_UAPI_AWB_FR_MAX_SETS 24 +#define IPU3_UAPI_AWB_FR_MD_ITEM_SIZE 8 +#define IPU3_UAPI_AWB_FR_BAYER_TBL_SIZE 256 +#define IPU3_UAPI_AWB_FR_SPARE_FOR_BUBBLES \ + (IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES * \ + IPU3_UAPI_AWB_FR_MD_ITEM_SIZE) +#define IPU3_UAPI_AWB_FR_BAYER_TABLE_MAX_SIZE \ + (IPU3_UAPI_AWB_FR_MAX_SETS * \ + (IPU3_UAPI_AWB_FR_BAYER_TBL_SIZE + \ + IPU3_UAPI_AWB_FR_SPARE_FOR_BUBBLES) * IPU3_UAPI_MAX_STRIPES) + +/** + * struct ipu3_uapi_awb_fr_raw_buffer - AWB filter response meta data + * + * @meta_data: Statistics output on the grid after convolving with 1D filter. + */ +struct ipu3_uapi_awb_fr_raw_buffer { + __u8 meta_data[IPU3_UAPI_AWB_FR_BAYER_TABLE_MAX_SIZE] + __attribute__((aligned(32))); +} __attribute__((packed)); + +/** + * struct ipu3_uapi_awb_fr_config_s - AWB filter response config + * + * @grid_cfg: grid config, default 16x16. + * @bayer_coeff: 1D Filter 1x11 center symmetry/anti-symmetry. + * coefficients defaults { 0, 0, 0, 0, 0, 128 }. + * Applied on whole image for each Bayer channel separately + * by a weighted sum of its 11x1 neighbors. + * @reserved1: reserved + * @bayer_sign: sign of filter coefficients, default 0. + * @bayer_nf: normalization factor for the convolution coeffs, to make sure + * total memory needed is within pre-determined range. + * NF should be the log2 of the sum of the abs values of the + * filter coeffs, range [7, 14], default 7. + * @reserved2: reserved + */ +struct ipu3_uapi_awb_fr_config_s { + struct ipu3_uapi_grid_config grid_cfg; + __u8 bayer_coeff[6]; + __u16 reserved1; + __u32 bayer_sign; + __u8 bayer_nf; + __u8 reserved2[7]; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_4a_config - 4A config + * + * @awb_config: &ipu3_uapi_awb_config_s, default resolution 16x16 + * @ae_grd_config: auto exposure statistics &ipu3_uapi_ae_grid_config + * @padding: paddings + * @af_config: auto focus config &ipu3_uapi_af_config_s + * @awb_fr_config: &ipu3_uapi_awb_fr_config_s, default resolution 16x16 + */ +struct ipu3_uapi_4a_config { + struct ipu3_uapi_awb_config_s awb_config __attribute__((aligned(32))); + struct ipu3_uapi_ae_grid_config ae_grd_config; + __u8 padding[20]; + struct ipu3_uapi_af_config_s af_config; + struct ipu3_uapi_awb_fr_config_s awb_fr_config + __attribute__((aligned(32))); +} __attribute__((packed)); + +/** + * struct ipu3_uapi_bubble_info - Bubble info for host side debugging + * + * @num_of_stripes: A single frame is divided into several parts called stripes + * due to limitation on line buffer memory. + * The separation between the stripes is vertical. Each such + * stripe is processed as a single frame by the ISP pipe. + * @padding: padding bytes. + * @num_sets: number of sets. + * @padding1: padding bytes. + * @size_of_set: set size. + * @padding2: padding bytes. + * @bubble_size: is the amount of padding in the bubble expressed in "sets". + * @padding3: padding bytes. + */ +struct ipu3_uapi_bubble_info { + __u32 num_of_stripes __attribute__((aligned(32))); + __u8 padding[28]; + __u32 num_sets; + __u8 padding1[28]; + __u32 size_of_set; + __u8 padding2[28]; + __u32 bubble_size; + __u8 padding3[28]; +} __attribute__((packed)); + +/* + * struct ipu3_uapi_stats_3a_bubble_info_per_stripe + */ +struct ipu3_uapi_stats_3a_bubble_info_per_stripe { + struct ipu3_uapi_bubble_info awb[IPU3_UAPI_MAX_STRIPES]; + struct ipu3_uapi_bubble_info af[IPU3_UAPI_MAX_STRIPES]; + struct ipu3_uapi_bubble_info awb_fr[IPU3_UAPI_MAX_STRIPES]; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_ff_status - Enable bits for each 3A fixed function + * + * @awb_en: auto white balance enable + * @padding: padding config + * @ae_en: auto exposure enable + * @padding1: padding config + * @af_en: auto focus enable + * @padding2: padding config + * @awb_fr_en: awb filter response enable bit + * @padding3: padding config + */ +struct ipu3_uapi_ff_status { + __u32 awb_en __attribute__((aligned(32))); + __u8 padding[28]; + __u32 ae_en; + __u8 padding1[28]; + __u32 af_en; + __u8 padding2[28]; + __u32 awb_fr_en; + __u8 padding3[28]; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_stats_3a - 3A statistics + * + * @awb_raw_buffer: auto white balance meta data &ipu3_uapi_awb_raw_buffer + * @ae_raw_buffer: auto exposure raw data &ipu3_uapi_ae_raw_buffer_aligned + * current Imgu does not output the auto exposure statistics + * to ae_raw_buffer, the user such as 3A algorithm can use the + * RGB table in &ipu3_uapi_awb_raw_buffer to do auto-exposure. + * @af_raw_buffer: &ipu3_uapi_af_raw_buffer for auto focus meta data + * @awb_fr_raw_buffer: value as specified by &ipu3_uapi_awb_fr_raw_buffer + * @stats_4a_config: 4a statistics config as defined by &ipu3_uapi_4a_config. + * @ae_join_buffers: 1 to use ae_raw_buffer. + * @padding: padding config + * @stats_3a_bubble_per_stripe: a &ipu3_uapi_stats_3a_bubble_info_per_stripe + * @stats_3a_status: 3a statistics status set in &ipu3_uapi_ff_status + */ +struct ipu3_uapi_stats_3a { + struct ipu3_uapi_awb_raw_buffer awb_raw_buffer; + struct ipu3_uapi_ae_raw_buffer_aligned + ae_raw_buffer[IPU3_UAPI_MAX_STRIPES]; + struct ipu3_uapi_af_raw_buffer af_raw_buffer; + struct ipu3_uapi_awb_fr_raw_buffer awb_fr_raw_buffer; + struct ipu3_uapi_4a_config stats_4a_config; + __u32 ae_join_buffers; + __u8 padding[28]; + struct ipu3_uapi_stats_3a_bubble_info_per_stripe + stats_3a_bubble_per_stripe; + struct ipu3_uapi_ff_status stats_3a_status; +} __attribute__((packed)); + +/******************* ipu3_uapi_acc_param *******************/ + +#define IPU3_UAPI_ISP_VEC_ELEMS 64 +#define IPU3_UAPI_ISP_TNR3_VMEM_LEN 9 + +#define IPU3_UAPI_BNR_LUT_SIZE 32 + +/* number of elements in gamma correction LUT */ +#define IPU3_UAPI_GAMMA_CORR_LUT_ENTRIES 256 + +/* largest grid is 73x56, for grid_height_per_slice of 2, 73x2 = 146 */ +#define IPU3_UAPI_SHD_MAX_CELLS_PER_SET 146 +#define IPU3_UAPI_SHD_MAX_CFG_SETS 28 +/* Normalization shift aka nf */ +#define IPU3_UAPI_SHD_BLGR_NF_SHIFT 13 +#define IPU3_UAPI_SHD_BLGR_NF_MASK 7 + +#define IPU3_UAPI_YUVP2_TCC_MACC_TABLE_ELEMENTS 16 +#define IPU3_UAPI_YUVP2_TCC_INV_Y_LUT_ELEMENTS 14 +#define IPU3_UAPI_YUVP2_TCC_GAIN_PCWL_LUT_ELEMENTS 258 +#define IPU3_UAPI_YUVP2_TCC_R_SQR_LUT_ELEMENTS 24 + +#define IPU3_UAPI_ANR_LUT_SIZE 26 +#define IPU3_UAPI_ANR_PYRAMID_SIZE 22 + +#define IPU3_UAPI_LIN_LUT_SIZE 64 + +/* Bayer Noise Reduction related structs */ + +/** + * struct ipu3_uapi_bnr_static_config_wb_gains_config - White balance gains + * + * @gr: white balance gain for Gr channel. + * @r: white balance gain for R channel. + * @b: white balance gain for B channel. + * @gb: white balance gain for Gb channel. + * + * For BNR parameters WB gain factor for the three channels [Ggr, Ggb, Gb, Gr]. + * Their precision is U3.13 and the range is (0, 8) and the actual gain is + * Gx + 1, it is typically Gx = 1. + * + * Pout = {Pin * (1 + Gx)}. + */ +struct ipu3_uapi_bnr_static_config_wb_gains_config { + __u16 gr; + __u16 r; + __u16 b; + __u16 gb; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_bnr_static_config_wb_gains_thr_config - Threshold config + * + * @gr: white balance threshold gain for Gr channel. + * @r: white balance threshold gain for R channel. + * @b: white balance threshold gain for B channel. + * @gb: white balance threshold gain for Gb channel. + * + * Defines the threshold that specifies how different a defect pixel can be from + * its neighbors.(used by dynamic defect pixel correction sub block) + * Precision u4.4 range [0, 8]. + */ +struct ipu3_uapi_bnr_static_config_wb_gains_thr_config { + __u8 gr; + __u8 r; + __u8 b; + __u8 gb; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_bnr_static_config_thr_coeffs_config - Noise model + * coefficients that controls noise threshold + * + * @cf: Free coefficient for threshold calculation, range [0, 8191], default 0. + * @reserved0: reserved + * @cg: Gain coefficient for threshold calculation, [0, 31], default 8. + * @ci: Intensity coefficient for threshold calculation. range [0, 0x1f] + * default 6. + * format: u3.2 (3 most significant bits represent whole number, + * 2 least significant bits represent the fractional part + * with each count representing 0.25) + * e.g. 6 in binary format is 00110, that translates to 1.5 + * @reserved1: reserved + * @r_nf: Normalization shift value for r^2 calculation, range [12, 20] + * where r is a radius of pixel [row, col] from centor of sensor. + * default 14. + * + * Threshold used to distinguish between noise and details. + */ +struct ipu3_uapi_bnr_static_config_thr_coeffs_config { + __u32 cf:13; + __u32 reserved0:3; + __u32 cg:5; + __u32 ci:5; + __u32 reserved1:1; + __u32 r_nf:5; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_bnr_static_config_thr_ctrl_shd_config - Shading config + * + * @gr: Coefficient defines lens shading gain approximation for gr channel + * @r: Coefficient defines lens shading gain approximation for r channel + * @b: Coefficient defines lens shading gain approximation for b channel + * @gb: Coefficient defines lens shading gain approximation for gb channel + * + * Parameters for noise model (NM) adaptation of BNR due to shading correction. + * All above have precision of u3.3, default to 0. + */ +struct ipu3_uapi_bnr_static_config_thr_ctrl_shd_config { + __u8 gr; + __u8 r; + __u8 b; + __u8 gb; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_bnr_static_config_opt_center_config - Optical center config + * + * @x_reset: Reset value of X (col start - X center). Precision s12.0. + * @reserved0: reserved + * @y_reset: Reset value of Y (row start - Y center). Precision s12.0. + * @reserved2: reserved + * + * Distance from corner to optical center for NM adaptation due to shading + * correction (should be calculated based on shading tables) + */ +struct ipu3_uapi_bnr_static_config_opt_center_config { + __s32 x_reset:13; + __u32 reserved0:3; + __s32 y_reset:13; + __u32 reserved2:3; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_bnr_static_config_lut_config - BNR square root lookup table + * + * @values: pre-calculated values of square root function. + * + * LUT implementation of square root operation. + */ +struct ipu3_uapi_bnr_static_config_lut_config { + __u8 values[IPU3_UAPI_BNR_LUT_SIZE]; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_bnr_static_config_bp_ctrl_config - Detect bad pixels (bp) + * + * @bp_thr_gain: Defines the threshold that specifies how different a + * defect pixel can be from its neighbors. Threshold is + * dependent on de-noise threshold calculated by algorithm. + * Range [4, 31], default 4. + * @reserved0: reserved + * @defect_mode: Mode of addressed defect pixels, + * 0 - single defect pixel is expected, + * 1 - 2 adjacent defect pixels are expected, default 1. + * @bp_gain: Defines how 2nd derivation that passes through a defect pixel + * is different from 2nd derivations that pass through + * neighbor pixels. u4.2, range [0, 256], default 8. + * @reserved1: reserved + * @w0_coeff: Blending coefficient of defect pixel correction. + * Precision u4, range [0, 8], default 8. + * @reserved2: reserved + * @w1_coeff: Enable influence of incorrect defect pixel correction to be + * avoided. Precision u4, range [1, 8], default 8. + * @reserved3: reserved + */ +struct ipu3_uapi_bnr_static_config_bp_ctrl_config { + __u32 bp_thr_gain:5; + __u32 reserved0:2; + __u32 defect_mode:1; + __u32 bp_gain:6; + __u32 reserved1:18; + __u32 w0_coeff:4; + __u32 reserved2:4; + __u32 w1_coeff:4; + __u32 reserved3:20; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_bnr_static_config_dn_detect_ctrl_config - Denoising config + * + * @alpha: Weight of central element of smoothing filter. + * @beta: Weight of peripheral elements of smoothing filter, default 4. + * @gamma: Weight of diagonal elements of smoothing filter, default 4. + * + * beta and gamma parameter define the strength of the noise removal filter. + * All above has precision u0.4, range [0, 0xf] + * format: u0.4 (no / zero bits represent whole number, + * 4 bits represent the fractional part + * with each count representing 0.0625) + * e.g. 0xf translates to 0.0625x15 = 0.9375 + * + * @reserved0: reserved + * @max_inf: Maximum increase of peripheral or diagonal element influence + * relative to the pre-defined value range: [0x5, 0xa] + * @reserved1: reserved + * @gd_enable: Green disparity enable control, 0 - disable, 1 - enable. + * @bpc_enable: Bad pixel correction enable control, 0 - disable, 1 - enable. + * @bnr_enable: Bayer noise removal enable control, 0 - disable, 1 - enable. + * @ff_enable: Fixed function enable, 0 - disable, 1 - enable. + * @reserved2: reserved + */ +struct ipu3_uapi_bnr_static_config_dn_detect_ctrl_config { + __u32 alpha:4; + __u32 beta:4; + __u32 gamma:4; + __u32 reserved0:4; + __u32 max_inf:4; + __u32 reserved1:7; + __u32 gd_enable:1; + __u32 bpc_enable:1; + __u32 bnr_enable:1; + __u32 ff_enable:1; + __u32 reserved2:1; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_bnr_static_config_opt_center_sqr_config - BNR optical square + * + * @x_sqr_reset: Reset value of X^2. + * @y_sqr_reset: Reset value of Y^2. + * + * Please note: + * + * #. X and Y ref to + * &ipu3_uapi_bnr_static_config_opt_center_config + * #. Both structs are used in threshold formula to calculate r^2, where r + * is a radius of pixel [row, col] from centor of sensor. + */ +struct ipu3_uapi_bnr_static_config_opt_center_sqr_config { + __u32 x_sqr_reset; + __u32 y_sqr_reset; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_bnr_static_config - BNR static config + * + * @wb_gains: white balance gains &ipu3_uapi_bnr_static_config_wb_gains_config + * @wb_gains_thr: white balance gains threshold as defined by + * &ipu3_uapi_bnr_static_config_wb_gains_thr_config + * @thr_coeffs: coefficients of threshold + * &ipu3_uapi_bnr_static_config_thr_coeffs_config + * @thr_ctrl_shd: control of shading threshold + * &ipu3_uapi_bnr_static_config_thr_ctrl_shd_config + * @opt_center: optical center &ipu3_uapi_bnr_static_config_opt_center_config + * + * Above parameters and opt_center_sqr are used for white balance and shading. + * + * @lut: lookup table &ipu3_uapi_bnr_static_config_lut_config + * @bp_ctrl: detect and remove bad pixels as defined in struct + * &ipu3_uapi_bnr_static_config_bp_ctrl_config + * @dn_detect_ctrl: detect and remove noise. + * &ipu3_uapi_bnr_static_config_dn_detect_ctrl_config + * @column_size: The number of pixels in column. + * @opt_center_sqr: Reset value of r^2 to optical center, see + * &ipu3_uapi_bnr_static_config_opt_center_sqr_config. + */ +struct ipu3_uapi_bnr_static_config { + struct ipu3_uapi_bnr_static_config_wb_gains_config wb_gains; + struct ipu3_uapi_bnr_static_config_wb_gains_thr_config wb_gains_thr; + struct ipu3_uapi_bnr_static_config_thr_coeffs_config thr_coeffs; + struct ipu3_uapi_bnr_static_config_thr_ctrl_shd_config thr_ctrl_shd; + struct ipu3_uapi_bnr_static_config_opt_center_config opt_center; + struct ipu3_uapi_bnr_static_config_lut_config lut; + struct ipu3_uapi_bnr_static_config_bp_ctrl_config bp_ctrl; + struct ipu3_uapi_bnr_static_config_dn_detect_ctrl_config dn_detect_ctrl; + __u32 column_size; + struct ipu3_uapi_bnr_static_config_opt_center_sqr_config opt_center_sqr; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_bnr_static_config_green_disparity - Correct green disparity + * + * @gd_red: Shading gain coeff for gr disparity level in bright red region. + * Precision u0.6, default 4(0.0625). + * @reserved0: reserved + * @gd_green: Shading gain coeff for gr disparity level in bright green + * region. Precision u0.6, default 4(0.0625). + * @reserved1: reserved + * @gd_blue: Shading gain coeff for gr disparity level in bright blue region. + * Precision u0.6, default 4(0.0625). + * @reserved2: reserved + * @gd_black: Maximal green disparity level in dark region (stronger disparity + * assumed to be image detail). Precision u14, default 80. + * @reserved3: reserved + * @gd_shading: Change maximal green disparity level according to square + * distance from image center. + * @reserved4: reserved + * @gd_support: Lower bound for the number of second green color pixels in + * current pixel neighborhood with less than threshold difference + * from it. + * + * The shading gain coeff of red, green, blue and black are used to calculate + * threshold given a pixel's color value and its coordinates in the image. + * + * @reserved5: reserved + * @gd_clip: Turn green disparity clip on/off, [0, 1], default 1. + * @gd_central_weight: Central pixel weight in 9 pixels weighted sum. + */ +struct ipu3_uapi_bnr_static_config_green_disparity { + __u32 gd_red:6; + __u32 reserved0:2; + __u32 gd_green:6; + __u32 reserved1:2; + __u32 gd_blue:6; + __u32 reserved2:10; + __u32 gd_black:14; + __u32 reserved3:2; + __u32 gd_shading:7; + __u32 reserved4:1; + __u32 gd_support:2; + __u32 reserved5:1; + __u32 gd_clip:1; + __u32 gd_central_weight:4; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_dm_config - De-mosaic parameters + * + * @dm_en: de-mosaic enable. + * @ch_ar_en: Checker artifacts removal enable flag. Default 0. + * @fcc_en: False color correction (FCC) enable flag. Default 0. + * @reserved0: reserved + * @frame_width: do not care + * @gamma_sc: Sharpening coefficient (coefficient of 2-d derivation of + * complementary color in Hamilton-Adams interpolation). + * u5, range [0, 31], default 8. + * @reserved1: reserved + * @lc_ctrl: Parameter that controls weights of Chroma Homogeneity metric + * in calculation of final homogeneity metric. + * u5, range [0, 31], default 7. + * @reserved2: reserved + * @cr_param1: First parameter that defines Checker artifact removal + * feature gain. Precision u5, range [0, 31], default 8. + * @reserved3: reserved + * @cr_param2: Second parameter that defines Checker artifact removal + * feature gain. Precision u5, range [0, 31], default 8. + * @reserved4: reserved + * @coring_param: Defines power of false color correction operation. + * low for preserving edge colors, high for preserving gray + * edge artifacts. + * Precision u1.4, range [0, 1.9375], default 4 (0.25). + * @reserved5: reserved + * + * The demosaic fixed function block is responsible to covert Bayer(mosaiced) + * images into color images based on demosaicing algorithm. + */ +struct ipu3_uapi_dm_config { + __u32 dm_en:1; + __u32 ch_ar_en:1; + __u32 fcc_en:1; + __u32 reserved0:13; + __u32 frame_width:16; + + __u32 gamma_sc:5; + __u32 reserved1:3; + __u32 lc_ctrl:5; + __u32 reserved2:3; + __u32 cr_param1:5; + __u32 reserved3:3; + __u32 cr_param2:5; + __u32 reserved4:3; + + __u32 coring_param:5; + __u32 reserved5:27; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_ccm_mat_config - Color correction matrix + * + * @coeff_m11: CCM 3x3 coefficient, range [-65536, 65535] + * @coeff_m12: CCM 3x3 coefficient, range [-8192, 8191] + * @coeff_m13: CCM 3x3 coefficient, range [-32768, 32767] + * @coeff_o_r: Bias 3x1 coefficient, range [-8191, 8181] + * @coeff_m21: CCM 3x3 coefficient, range [-32767, 32767] + * @coeff_m22: CCM 3x3 coefficient, range [-8192, 8191] + * @coeff_m23: CCM 3x3 coefficient, range [-32768, 32767] + * @coeff_o_g: Bias 3x1 coefficient, range [-8191, 8181] + * @coeff_m31: CCM 3x3 coefficient, range [-32768, 32767] + * @coeff_m32: CCM 3x3 coefficient, range [-8192, 8191] + * @coeff_m33: CCM 3x3 coefficient, range [-32768, 32767] + * @coeff_o_b: Bias 3x1 coefficient, range [-8191, 8181] + * + * Transform sensor specific color space to standard sRGB by applying 3x3 matrix + * and adding a bias vector O. The transformation is basically a rotation and + * translation in the 3-dimensional color spaces. Here are the defaults: + * + * 9775, -2671, 1087, 0 + * -1071, 8303, 815, 0 + * -23, -7887, 16103, 0 + */ +struct ipu3_uapi_ccm_mat_config { + __s16 coeff_m11; + __s16 coeff_m12; + __s16 coeff_m13; + __s16 coeff_o_r; + __s16 coeff_m21; + __s16 coeff_m22; + __s16 coeff_m23; + __s16 coeff_o_g; + __s16 coeff_m31; + __s16 coeff_m32; + __s16 coeff_m33; + __s16 coeff_o_b; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_gamma_corr_ctrl - Gamma correction + * + * @enable: gamma correction enable. + * @reserved: reserved + */ +struct ipu3_uapi_gamma_corr_ctrl { + __u32 enable:1; + __u32 reserved:31; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_gamma_corr_lut - Per-pixel tone mapping implemented as LUT. + * + * @lut: 256 tabulated values of the gamma function. LUT[1].. LUT[256] + * format u13.0, range [0, 8191]. + * + * The tone mapping operation is done by a Piece wise linear graph + * that is implemented as a lookup table(LUT). The pixel component input + * intensity is the X-axis of the graph which is the table entry. + */ +struct ipu3_uapi_gamma_corr_lut { + __u16 lut[IPU3_UAPI_GAMMA_CORR_LUT_ENTRIES]; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_gamma_config - Gamma config + * + * @gc_ctrl: control of gamma correction &ipu3_uapi_gamma_corr_ctrl + * @gc_lut: lookup table of gamma correction &ipu3_uapi_gamma_corr_lut + */ +struct ipu3_uapi_gamma_config { + struct ipu3_uapi_gamma_corr_ctrl gc_ctrl __attribute__((aligned(32))); + struct ipu3_uapi_gamma_corr_lut gc_lut __attribute__((aligned(32))); +} __attribute__((packed)); + +/** + * struct ipu3_uapi_csc_mat_config - Color space conversion matrix config + * + * @coeff_c11: Conversion matrix value, format s0.14, range [-16384, 16383]. + * @coeff_c12: Conversion matrix value, format s0.14, range [-8192, 8191]. + * @coeff_c13: Conversion matrix value, format s0.14, range [-16384, 16383]. + * @coeff_b1: Bias 3x1 coefficient, s13.0 range [-8192, 8191]. + * @coeff_c21: Conversion matrix value, format s0.14, range [-16384, 16383]. + * @coeff_c22: Conversion matrix value, format s0.14, range [-8192, 8191]. + * @coeff_c23: Conversion matrix value, format s0.14, range [-16384, 16383]. + * @coeff_b2: Bias 3x1 coefficient, s13.0 range [-8192, 8191]. + * @coeff_c31: Conversion matrix value, format s0.14, range [-16384, 16383]. + * @coeff_c32: Conversion matrix value, format s0.14, range [-8192, 8191]. + * @coeff_c33: Conversion matrix value, format s0.14, range [-16384, 16383]. + * @coeff_b3: Bias 3x1 coefficient, s13.0 range [-8192, 8191]. + * + * To transform each pixel from RGB to YUV (Y - brightness/luminance, + * UV -chroma) by applying the pixel's values by a 3x3 matrix and adding an + * optional bias 3x1 vector. Here are the default values for the matrix: + * + * 4898, 9617, 1867, 0, + * -2410, -4732, 7143, 0, + * 10076, -8437, -1638, 0, + * + * (i.e. for real number 0.299, 0.299 * 2^14 becomes 4898.) + */ +struct ipu3_uapi_csc_mat_config { + __s16 coeff_c11; + __s16 coeff_c12; + __s16 coeff_c13; + __s16 coeff_b1; + __s16 coeff_c21; + __s16 coeff_c22; + __s16 coeff_c23; + __s16 coeff_b2; + __s16 coeff_c31; + __s16 coeff_c32; + __s16 coeff_c33; + __s16 coeff_b3; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_cds_params - Chroma down-scaling + * + * @ds_c00: range [0, 3] + * @ds_c01: range [0, 3] + * @ds_c02: range [0, 3] + * @ds_c03: range [0, 3] + * @ds_c10: range [0, 3] + * @ds_c11: range [0, 3] + * @ds_c12: range [0, 3] + * @ds_c13: range [0, 3] + * + * In case user does not provide, above 4x2 filter will use following defaults: + * 1, 3, 3, 1, + * 1, 3, 3, 1, + * + * @ds_nf: Normalization factor for Chroma output downscaling filter, + * range 0,4, default 2. + * @reserved0: reserved + * @csc_en: Color space conversion enable + * @uv_bin_output: 0: output YUV 4.2.0, 1: output YUV 4.2.2(default). + * @reserved1: reserved + */ +struct ipu3_uapi_cds_params { + __u32 ds_c00:2; + __u32 ds_c01:2; + __u32 ds_c02:2; + __u32 ds_c03:2; + __u32 ds_c10:2; + __u32 ds_c11:2; + __u32 ds_c12:2; + __u32 ds_c13:2; + __u32 ds_nf:5; + __u32 reserved0:3; + __u32 csc_en:1; + __u32 uv_bin_output:1; + __u32 reserved1:6; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_shd_grid_config - Bayer shading(darkening) correction + * + * @width: Grid horizontal dimensions, u8, [8, 128], default 73 + * @height: Grid vertical dimensions, u8, [8, 128], default 56 + * @block_width_log2: Log2 of the width of the grid cell in pixel count + * u4, [0, 15], default value 5. + * @reserved0: reserved + * @block_height_log2: Log2 of the height of the grid cell in pixel count + * u4, [0, 15], default value 6. + * @reserved1: reserved + * @grid_height_per_slice: SHD_MAX_CELLS_PER_SET/width. + * (with SHD_MAX_CELLS_PER_SET = 146). + * @x_start: X value of top left corner of sensor relative to ROI + * s13, [-4096, 0], default 0, only negative values. + * @y_start: Y value of top left corner of sensor relative to ROI + * s13, [-4096, 0], default 0, only negative values. + */ +struct ipu3_uapi_shd_grid_config { + /* reg 0 */ + __u8 width; + __u8 height; + __u8 block_width_log2:3; + __u8 reserved0:1; + __u8 block_height_log2:3; + __u8 reserved1:1; + __u8 grid_height_per_slice; + /* reg 1 */ + __s16 x_start; + __s16 y_start; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_shd_general_config - Shading general config + * + * @init_set_vrt_offst_ul: set vertical offset, + * y_start >> block_height_log2 % grid_height_per_slice. + * @shd_enable: shading enable. + * @gain_factor: Gain factor. Shift calculated anti shading value. Precision u2. + * 0x0 - gain factor [1, 5], means no shift interpolated value. + * 0x1 - gain factor [1, 9], means shift interpolated by 1. + * 0x2 - gain factor [1, 17], means shift interpolated by 2. + * @reserved: reserved + * + * Correction is performed by multiplying a gain factor for each of the 4 Bayer + * channels as a function of the pixel location in the sensor. + */ +struct ipu3_uapi_shd_general_config { + __u32 init_set_vrt_offst_ul:8; + __u32 shd_enable:1; + __u32 gain_factor:2; + __u32 reserved:21; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_shd_black_level_config - Black level correction + * + * @bl_r: Bios values for green red. s11 range [-2048, 2047]. + * @bl_gr: Bios values for green blue. s11 range [-2048, 2047]. + * @bl_gb: Bios values for red. s11 range [-2048, 2047]. + * @bl_b: Bios values for blue. s11 range [-2048, 2047]. + */ +struct ipu3_uapi_shd_black_level_config { + __s16 bl_r; + __s16 bl_gr; + __s16 bl_gb; + __s16 bl_b; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_shd_config_static - Shading config static + * + * @grid: shading grid config &ipu3_uapi_shd_grid_config + * @general: shading general config &ipu3_uapi_shd_general_config + * @black_level: black level config for shading correction as defined by + * &ipu3_uapi_shd_black_level_config + */ +struct ipu3_uapi_shd_config_static { + struct ipu3_uapi_shd_grid_config grid; + struct ipu3_uapi_shd_general_config general; + struct ipu3_uapi_shd_black_level_config black_level; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_shd_lut - Shading gain factor lookup table. + * + * @sets: array + * @sets.r_and_gr: Red and GreenR Lookup table. + * @sets.r_and_gr.r: Red shading factor. + * @sets.r_and_gr.gr: GreenR shading factor. + * @sets.reserved1: reserved + * @sets.gb_and_b: GreenB and Blue Lookup table. + * @sets.gb_and_b.gb: GreenB shading factor. + * @sets.gb_and_b.b: Blue shading factor. + * @sets.reserved2: reserved + * + * Map to shading correction LUT register set. + */ +struct ipu3_uapi_shd_lut { + struct { + struct { + __u16 r; + __u16 gr; + } r_and_gr[IPU3_UAPI_SHD_MAX_CELLS_PER_SET]; + __u8 reserved1[24]; + struct { + __u16 gb; + __u16 b; + } gb_and_b[IPU3_UAPI_SHD_MAX_CELLS_PER_SET]; + __u8 reserved2[24]; + } sets[IPU3_UAPI_SHD_MAX_CFG_SETS]; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_shd_config - Shading config + * + * @shd: shading static config, see &ipu3_uapi_shd_config_static + * @shd_lut: shading lookup table &ipu3_uapi_shd_lut + */ +struct ipu3_uapi_shd_config { + struct ipu3_uapi_shd_config_static shd __attribute__((aligned(32))); + struct ipu3_uapi_shd_lut shd_lut __attribute__((aligned(32))); +} __attribute__((packed)); + +/* Image Enhancement Filter directed */ + +/** + * struct ipu3_uapi_iefd_cux2 - IEFd Config Unit 2 parameters + * + * @x0: X0 point of Config Unit, u9.0, default 0. + * @x1: X1 point of Config Unit, u9.0, default 0. + * @a01: Slope A of Config Unit, s4.4, default 0. + * @b01: Slope B, always 0. + * + * Calculate weight for blending directed and non-directed denoise elements + * + * Note: + * Each instance of Config Unit needs X coordinate of n points and + * slope A factor between points calculated by driver based on calibration + * parameters. + * + * All CU inputs are unsigned, they will be converted to signed when written + * to register, i.e. a01 will be written to 9 bit register in s4.4 format. + * The data precision s4.4 means 4 bits for integer parts and 4 bits for the + * fractional part, the first bit indicates positive or negative value. + * For userspace software (commonly the imaging library), the computation for + * the CU slope values should be based on the slope resolution 1/16 (binary + * 0.0001 - the minimal interval value), the slope value range is [-256, +255]. + * This applies to &ipu3_uapi_iefd_cux6_ed, &ipu3_uapi_iefd_cux2_1, + * &ipu3_uapi_iefd_cux2_1, &ipu3_uapi_iefd_cux4 and &ipu3_uapi_iefd_cux6_rad. + */ +struct ipu3_uapi_iefd_cux2 { + __u32 x0:9; + __u32 x1:9; + __u32 a01:9; + __u32 b01:5; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_iefd_cux6_ed - Calculate power of non-directed sharpening + * element, Config Unit 6 for edge detail (ED). + * + * @x0: X coordinate of point 0, u9.0, default 0. + * @x1: X coordinate of point 1, u9.0, default 0. + * @x2: X coordinate of point 2, u9.0, default 0. + * @reserved0: reserved + * @x3: X coordinate of point 3, u9.0, default 0. + * @x4: X coordinate of point 4, u9.0, default 0. + * @x5: X coordinate of point 5, u9.0, default 0. + * @reserved1: reserved + * @a01: slope A points 01, s4.4, default 0. + * @a12: slope A points 12, s4.4, default 0. + * @a23: slope A points 23, s4.4, default 0. + * @reserved2: reserved + * @a34: slope A points 34, s4.4, default 0. + * @a45: slope A points 45, s4.4, default 0. + * @reserved3: reserved + * @b01: slope B points 01, s4.4, default 0. + * @b12: slope B points 12, s4.4, default 0. + * @b23: slope B points 23, s4.4, default 0. + * @reserved4: reserved + * @b34: slope B points 34, s4.4, default 0. + * @b45: slope B points 45, s4.4, default 0. + * @reserved5: reserved. + */ +struct ipu3_uapi_iefd_cux6_ed { + __u32 x0:9; + __u32 x1:9; + __u32 x2:9; + __u32 reserved0:5; + + __u32 x3:9; + __u32 x4:9; + __u32 x5:9; + __u32 reserved1:5; + + __u32 a01:9; + __u32 a12:9; + __u32 a23:9; + __u32 reserved2:5; + + __u32 a34:9; + __u32 a45:9; + __u32 reserved3:14; + + __u32 b01:9; + __u32 b12:9; + __u32 b23:9; + __u32 reserved4:5; + + __u32 b34:9; + __u32 b45:9; + __u32 reserved5:14; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_iefd_cux2_1 - Calculate power of non-directed denoise + * element apply. + * @x0: X0 point of Config Unit, u9.0, default 0. + * @x1: X1 point of Config Unit, u9.0, default 0. + * @a01: Slope A of Config Unit, s4.4, default 0. + * @reserved1: reserved + * @b01: offset B0 of Config Unit, u7.0, default 0. + * @reserved2: reserved + */ +struct ipu3_uapi_iefd_cux2_1 { + __u32 x0:9; + __u32 x1:9; + __u32 a01:9; + __u32 reserved1:5; + + __u32 b01:8; + __u32 reserved2:24; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_iefd_cux4 - Calculate power of non-directed sharpening + * element. + * + * @x0: X0 point of Config Unit, u9.0, default 0. + * @x1: X1 point of Config Unit, u9.0, default 0. + * @x2: X2 point of Config Unit, u9.0, default 0. + * @reserved0: reserved + * @x3: X3 point of Config Unit, u9.0, default 0. + * @a01: Slope A0 of Config Unit, s4.4, default 0. + * @a12: Slope A1 of Config Unit, s4.4, default 0. + * @reserved1: reserved + * @a23: Slope A2 of Config Unit, s4.4, default 0. + * @b01: Offset B0 of Config Unit, s7.0, default 0. + * @b12: Offset B1 of Config Unit, s7.0, default 0. + * @reserved2: reserved + * @b23: Offset B2 of Config Unit, s7.0, default 0. + * @reserved3: reserved + */ +struct ipu3_uapi_iefd_cux4 { + __u32 x0:9; + __u32 x1:9; + __u32 x2:9; + __u32 reserved0:5; + + __u32 x3:9; + __u32 a01:9; + __u32 a12:9; + __u32 reserved1:5; + + __u32 a23:9; + __u32 b01:8; + __u32 b12:8; + __u32 reserved2:7; + + __u32 b23:8; + __u32 reserved3:24; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_iefd_cux6_rad - Radial Config Unit (CU) + * + * @x0: x0 points of Config Unit radial, u8.0 + * @x1: x1 points of Config Unit radial, u8.0 + * @x2: x2 points of Config Unit radial, u8.0 + * @x3: x3 points of Config Unit radial, u8.0 + * @x4: x4 points of Config Unit radial, u8.0 + * @x5: x5 points of Config Unit radial, u8.0 + * @reserved1: reserved + * @a01: Slope A of Config Unit radial, s7.8 + * @a12: Slope A of Config Unit radial, s7.8 + * @a23: Slope A of Config Unit radial, s7.8 + * @a34: Slope A of Config Unit radial, s7.8 + * @a45: Slope A of Config Unit radial, s7.8 + * @reserved2: reserved + * @b01: Slope B of Config Unit radial, s9.0 + * @b12: Slope B of Config Unit radial, s9.0 + * @b23: Slope B of Config Unit radial, s9.0 + * @reserved4: reserved + * @b34: Slope B of Config Unit radial, s9.0 + * @b45: Slope B of Config Unit radial, s9.0 + * @reserved5: reserved + */ +struct ipu3_uapi_iefd_cux6_rad { + __u32 x0:8; + __u32 x1:8; + __u32 x2:8; + __u32 x3:8; + + __u32 x4:8; + __u32 x5:8; + __u32 reserved1:16; + + __u32 a01:16; + __u32 a12:16; + + __u32 a23:16; + __u32 a34:16; + + __u32 a45:16; + __u32 reserved2:16; + + __u32 b01:10; + __u32 b12:10; + __u32 b23:10; + __u32 reserved4:2; + + __u32 b34:10; + __u32 b45:10; + __u32 reserved5:12; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_iefd_cfg_units - IEFd Config Units parameters + * + * @cu_1: calculate weight for blending directed and + * non-directed denoise elements. See &ipu3_uapi_iefd_cux2 + * @cu_ed: calculate power of non-directed sharpening element, see + * &ipu3_uapi_iefd_cux6_ed + * @cu_3: calculate weight for blending directed and + * non-directed denoise elements. A &ipu3_uapi_iefd_cux2 + * @cu_5: calculate power of non-directed denoise element apply, use + * &ipu3_uapi_iefd_cux2_1 + * @cu_6: calculate power of non-directed sharpening element. See + * &ipu3_uapi_iefd_cux4 + * @cu_7: calculate weight for blending directed and + * non-directed denoise elements. Use &ipu3_uapi_iefd_cux2 + * @cu_unsharp: Config Unit of unsharp &ipu3_uapi_iefd_cux4 + * @cu_radial: Config Unit of radial &ipu3_uapi_iefd_cux6_rad + * @cu_vssnlm: Config Unit of vssnlm &ipu3_uapi_iefd_cux2 + */ +struct ipu3_uapi_yuvp1_iefd_cfg_units { + struct ipu3_uapi_iefd_cux2 cu_1; + struct ipu3_uapi_iefd_cux6_ed cu_ed; + struct ipu3_uapi_iefd_cux2 cu_3; + struct ipu3_uapi_iefd_cux2_1 cu_5; + struct ipu3_uapi_iefd_cux4 cu_6; + struct ipu3_uapi_iefd_cux2 cu_7; + struct ipu3_uapi_iefd_cux4 cu_unsharp; + struct ipu3_uapi_iefd_cux6_rad cu_radial; + struct ipu3_uapi_iefd_cux2 cu_vssnlm; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_iefd_config_s - IEFd config + * + * @horver_diag_coeff: Gradient compensation. Compared with vertical / + * horizontal (0 / 90 degree), coefficient of diagonal (45 / + * 135 degree) direction should be corrected by approx. + * 1/sqrt(2). + * @reserved0: reserved + * @clamp_stitch: Slope to stitch between clamped and unclamped edge values + * @reserved1: reserved + * @direct_metric_update: Update coeff for direction metric + * @reserved2: reserved + * @ed_horver_diag_coeff: Radial Coefficient that compensates for + * different distance for vertical/horizontal and + * diagonal gradient calculation (approx. 1/sqrt(2)) + * @reserved3: reserved + */ +struct ipu3_uapi_yuvp1_iefd_config_s { + __u32 horver_diag_coeff:7; + __u32 reserved0:1; + __u32 clamp_stitch:6; + __u32 reserved1:2; + __u32 direct_metric_update:5; + __u32 reserved2:3; + __u32 ed_horver_diag_coeff:7; + __u32 reserved3:1; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_iefd_control - IEFd control + * + * @iefd_en: Enable IEFd + * @denoise_en: Enable denoise + * @direct_smooth_en: Enable directional smooth + * @rad_en: Enable radial update + * @vssnlm_en: Enable VSSNLM output filter + * @reserved: reserved + */ +struct ipu3_uapi_yuvp1_iefd_control { + __u32 iefd_en:1; + __u32 denoise_en:1; + __u32 direct_smooth_en:1; + __u32 rad_en:1; + __u32 vssnlm_en:1; + __u32 reserved:27; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_sharp_cfg - Sharpening config + * + * @nega_lmt_txt: Sharpening limit for negative overshoots for texture. + * @reserved0: reserved + * @posi_lmt_txt: Sharpening limit for positive overshoots for texture. + * @reserved1: reserved + * @nega_lmt_dir: Sharpening limit for negative overshoots for direction (edge). + * @reserved2: reserved + * @posi_lmt_dir: Sharpening limit for positive overshoots for direction (edge). + * @reserved3: reserved + * + * Fixed point type u13.0, range [0, 8191]. + */ +struct ipu3_uapi_sharp_cfg { + __u32 nega_lmt_txt:13; + __u32 reserved0:19; + __u32 posi_lmt_txt:13; + __u32 reserved1:19; + __u32 nega_lmt_dir:13; + __u32 reserved2:19; + __u32 posi_lmt_dir:13; + __u32 reserved3:19; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_far_w - Sharpening config for far sub-group + * + * @dir_shrp: Weight of wide direct sharpening, u1.6, range [0, 64], default 64. + * @reserved0: reserved + * @dir_dns: Weight of wide direct denoising, u1.6, range [0, 64], default 0. + * @reserved1: reserved + * @ndir_dns_powr: Power of non-direct denoising, + * Precision u1.6, range [0, 64], default 64. + * @reserved2: reserved + */ +struct ipu3_uapi_far_w { + __u32 dir_shrp:7; + __u32 reserved0:1; + __u32 dir_dns:7; + __u32 reserved1:1; + __u32 ndir_dns_powr:7; + __u32 reserved2:9; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_unsharp_cfg - Unsharp config + * + * @unsharp_weight: Unsharp mask blending weight. + * u1.6, range [0, 64], default 16. + * 0 - disabled, 64 - use only unsharp. + * @reserved0: reserved + * @unsharp_amount: Unsharp mask amount, u4.5, range [0, 511], default 0. + * @reserved1: reserved + */ +struct ipu3_uapi_unsharp_cfg { + __u32 unsharp_weight:7; + __u32 reserved0:1; + __u32 unsharp_amount:9; + __u32 reserved1:15; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_iefd_shrp_cfg - IEFd sharpness config + * + * @cfg: sharpness config &ipu3_uapi_sharp_cfg + * @far_w: wide range config, value as specified by &ipu3_uapi_far_w: + * The 5x5 environment is separated into 2 sub-groups, the 3x3 nearest + * neighbors (8 pixels called Near), and the second order neighborhood + * around them (16 pixels called Far). + * @unshrp_cfg: unsharpness config. &ipu3_uapi_unsharp_cfg + */ +struct ipu3_uapi_yuvp1_iefd_shrp_cfg { + struct ipu3_uapi_sharp_cfg cfg; + struct ipu3_uapi_far_w far_w; + struct ipu3_uapi_unsharp_cfg unshrp_cfg; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_unsharp_coef0 - Unsharp mask coefficients + * + * @c00: Coeff11, s0.8, range [-255, 255], default 1. + * @c01: Coeff12, s0.8, range [-255, 255], default 5. + * @c02: Coeff13, s0.8, range [-255, 255], default 9. + * @reserved: reserved + * + * Configurable registers for common sharpening support. + */ +struct ipu3_uapi_unsharp_coef0 { + __u32 c00:9; + __u32 c01:9; + __u32 c02:9; + __u32 reserved:5; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_unsharp_coef1 - Unsharp mask coefficients + * + * @c11: Coeff22, s0.8, range [-255, 255], default 29. + * @c12: Coeff23, s0.8, range [-255, 255], default 55. + * @c22: Coeff33, s0.8, range [-255, 255], default 96. + * @reserved: reserved + */ +struct ipu3_uapi_unsharp_coef1 { + __u32 c11:9; + __u32 c12:9; + __u32 c22:9; + __u32 reserved:5; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_iefd_unshrp_cfg - Unsharp mask config + * + * @unsharp_coef0: unsharp coefficient 0 config. See &ipu3_uapi_unsharp_coef0 + * @unsharp_coef1: unsharp coefficient 1 config. See &ipu3_uapi_unsharp_coef1 + */ +struct ipu3_uapi_yuvp1_iefd_unshrp_cfg { + struct ipu3_uapi_unsharp_coef0 unsharp_coef0; + struct ipu3_uapi_unsharp_coef1 unsharp_coef1; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_radial_reset_xy - Radial coordinate reset + * + * @x: Radial reset of x coordinate. Precision s12, [-4095, 4095], default 0. + * @reserved0: reserved + * @y: Radial center y coordinate. Precision s12, [-4095, 4095], default 0. + * @reserved1: reserved + */ +struct ipu3_uapi_radial_reset_xy { + __s32 x:13; + __u32 reserved0:3; + __s32 y:13; + __u32 reserved1:3; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_radial_reset_x2 - Radial X^2 reset + * + * @x2: Radial reset of x^2 coordinate. Precision u24, default 0. + * @reserved: reserved + */ +struct ipu3_uapi_radial_reset_x2 { + __u32 x2:24; + __u32 reserved:8; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_radial_reset_y2 - Radial Y^2 reset + * + * @y2: Radial reset of y^2 coordinate. Precision u24, default 0. + * @reserved: reserved + */ +struct ipu3_uapi_radial_reset_y2 { + __u32 y2:24; + __u32 reserved:8; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_radial_cfg - Radial config + * + * @rad_nf: Radial. R^2 normalization factor is scale down by 2^ - (15 + scale) + * @reserved0: reserved + * @rad_inv_r2: Radial R^-2 normelized to (0.5..1). + * Precision u7, range [0, 127]. + * @reserved1: reserved + */ +struct ipu3_uapi_radial_cfg { + __u32 rad_nf:4; + __u32 reserved0:4; + __u32 rad_inv_r2:7; + __u32 reserved1:17; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_rad_far_w - Radial FAR sub-group + * + * @rad_dir_far_sharp_w: Weight of wide direct sharpening, u1.6, range [0, 64], + * default 64. + * @rad_dir_far_dns_w: Weight of wide direct denoising, u1.6, range [0, 64], + * default 0. + * @rad_ndir_far_dns_power: power of non-direct sharpening, u1.6, range [0, 64], + * default 0. + * @reserved: reserved + */ +struct ipu3_uapi_rad_far_w { + __u32 rad_dir_far_sharp_w:8; + __u32 rad_dir_far_dns_w:8; + __u32 rad_ndir_far_dns_power:8; + __u32 reserved:8; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_cu_cfg0 - Radius Config Unit cfg0 register + * + * @cu6_pow: Power of CU6. Power of non-direct sharpening, u3.4. + * @reserved0: reserved + * @cu_unsharp_pow: Power of unsharp mask, u2.4. + * @reserved1: reserved + * @rad_cu6_pow: Radial/corner CU6. Directed sharpening power, u3.4. + * @reserved2: reserved + * @rad_cu_unsharp_pow: Radial power of unsharp mask, u2.4. + * @reserved3: reserved + */ +struct ipu3_uapi_cu_cfg0 { + __u32 cu6_pow:7; + __u32 reserved0:1; + __u32 cu_unsharp_pow:7; + __u32 reserved1:1; + __u32 rad_cu6_pow:7; + __u32 reserved2:1; + __u32 rad_cu_unsharp_pow:6; + __u32 reserved3:2; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_cu_cfg1 - Radius Config Unit cfg1 register + * + * @rad_cu6_x1: X1 point of Config Unit 6, precision u9.0. + * @reserved0: reserved + * @rad_cu_unsharp_x1: X1 point for Config Unit unsharp for radial/corner point + * precision u9.0. + * @reserved1: reserved + */ +struct ipu3_uapi_cu_cfg1 { + __u32 rad_cu6_x1:9; + __u32 reserved0:1; + __u32 rad_cu_unsharp_x1:9; + __u32 reserved1:13; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_iefd_rad_cfg - IEFd parameters changed radially over + * the picture plane. + * + * @reset_xy: reset xy value in radial calculation. &ipu3_uapi_radial_reset_xy + * @reset_x2: reset x square value in radial calculation. See struct + * &ipu3_uapi_radial_reset_x2 + * @reset_y2: reset y square value in radial calculation. See struct + * &ipu3_uapi_radial_reset_y2 + * @cfg: radial config defined in &ipu3_uapi_radial_cfg + * @rad_far_w: weight for wide range radial. &ipu3_uapi_rad_far_w + * @cu_cfg0: configuration unit 0. See &ipu3_uapi_cu_cfg0 + * @cu_cfg1: configuration unit 1. See &ipu3_uapi_cu_cfg1 + */ +struct ipu3_uapi_yuvp1_iefd_rad_cfg { + struct ipu3_uapi_radial_reset_xy reset_xy; + struct ipu3_uapi_radial_reset_x2 reset_x2; + struct ipu3_uapi_radial_reset_y2 reset_y2; + struct ipu3_uapi_radial_cfg cfg; + struct ipu3_uapi_rad_far_w rad_far_w; + struct ipu3_uapi_cu_cfg0 cu_cfg0; + struct ipu3_uapi_cu_cfg1 cu_cfg1; +} __attribute__((packed)); + +/* Vssnlm - Very small scale non-local mean algorithm */ + +/** + * struct ipu3_uapi_vss_lut_x - Vssnlm LUT x0/x1/x2 + * + * @vs_x0: Vssnlm LUT x0, precision u8, range [0, 255], default 16. + * @vs_x1: Vssnlm LUT x1, precision u8, range [0, 255], default 32. + * @vs_x2: Vssnlm LUT x2, precision u8, range [0, 255], default 64. + * @reserved2: reserved + */ +struct ipu3_uapi_vss_lut_x { + __u32 vs_x0:8; + __u32 vs_x1:8; + __u32 vs_x2:8; + __u32 reserved2:8; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_vss_lut_y - Vssnlm LUT y0/y1/y2 + * + * @vs_y1: Vssnlm LUT y1, precision u4, range [0, 8], default 1. + * @reserved0: reserved + * @vs_y2: Vssnlm LUT y2, precision u4, range [0, 8], default 3. + * @reserved1: reserved + * @vs_y3: Vssnlm LUT y3, precision u4, range [0, 8], default 8. + * @reserved2: reserved + */ +struct ipu3_uapi_vss_lut_y { + __u32 vs_y1:4; + __u32 reserved0:4; + __u32 vs_y2:4; + __u32 reserved1:4; + __u32 vs_y3:4; + __u32 reserved2:12; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_iefd_vssnlm_cfg - IEFd Vssnlm Lookup table + * + * @vss_lut_x: vss lookup table. See &ipu3_uapi_vss_lut_x description + * @vss_lut_y: vss lookup table. See &ipu3_uapi_vss_lut_y description + */ +struct ipu3_uapi_yuvp1_iefd_vssnlm_cfg { + struct ipu3_uapi_vss_lut_x vss_lut_x; + struct ipu3_uapi_vss_lut_y vss_lut_y; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_iefd_config - IEFd config + * + * @units: configuration unit setting, &ipu3_uapi_yuvp1_iefd_cfg_units + * @config: configuration, as defined by &ipu3_uapi_yuvp1_iefd_config_s + * @control: control setting, as defined by &ipu3_uapi_yuvp1_iefd_control + * @sharp: sharpness setting, as defined by &ipu3_uapi_yuvp1_iefd_shrp_cfg + * @unsharp: unsharpness setting, as defined by &ipu3_uapi_yuvp1_iefd_unshrp_cfg + * @rad: radial setting, as defined by &ipu3_uapi_yuvp1_iefd_rad_cfg + * @vsslnm: vsslnm setting, as defined by &ipu3_uapi_yuvp1_iefd_vssnlm_cfg + */ +struct ipu3_uapi_yuvp1_iefd_config { + struct ipu3_uapi_yuvp1_iefd_cfg_units units; + struct ipu3_uapi_yuvp1_iefd_config_s config; + struct ipu3_uapi_yuvp1_iefd_control control; + struct ipu3_uapi_yuvp1_iefd_shrp_cfg sharp; + struct ipu3_uapi_yuvp1_iefd_unshrp_cfg unsharp; + struct ipu3_uapi_yuvp1_iefd_rad_cfg rad; + struct ipu3_uapi_yuvp1_iefd_vssnlm_cfg vsslnm; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_yds_config - Y Down-Sampling config + * + * @c00: range [0, 3], default 0x0 + * @c01: range [0, 3], default 0x1 + * @c02: range [0, 3], default 0x1 + * @c03: range [0, 3], default 0x0 + * @c10: range [0, 3], default 0x0 + * @c11: range [0, 3], default 0x1 + * @c12: range [0, 3], default 0x1 + * @c13: range [0, 3], default 0x0 + * + * Above are 4x2 filter coefficients for chroma output downscaling. + * + * @norm_factor: Normalization factor, range [0, 4], default 2 + * 0 - divide by 1 + * 1 - divide by 2 + * 2 - divide by 4 + * 3 - divide by 8 + * 4 - divide by 16 + * @reserved0: reserved + * @bin_output: Down sampling on Luma channel in two optional modes + * 0 - Bin output 4.2.0 (default), 1 output 4.2.2. + * @reserved1: reserved + */ +struct ipu3_uapi_yuvp1_yds_config { + __u32 c00:2; + __u32 c01:2; + __u32 c02:2; + __u32 c03:2; + __u32 c10:2; + __u32 c11:2; + __u32 c12:2; + __u32 c13:2; + __u32 norm_factor:5; + __u32 reserved0:4; + __u32 bin_output:1; + __u32 reserved1:6; +} __attribute__((packed)); + +/* Chroma Noise Reduction */ + +/** + * struct ipu3_uapi_yuvp1_chnr_enable_config - Chroma noise reduction enable + * + * @enable: enable/disable chroma noise reduction + * @yuv_mode: 0 - YUV420, 1 - YUV422 + * @reserved0: reserved + * @col_size: number of columns in the frame, max width is 2560 + * @reserved1: reserved + */ +struct ipu3_uapi_yuvp1_chnr_enable_config { + __u32 enable:1; + __u32 yuv_mode:1; + __u32 reserved0:14; + __u32 col_size:12; + __u32 reserved1:4; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_chnr_coring_config - Coring thresholds for UV + * + * @u: U coring level, u0.13, range [0.0, 1.0], default 0.0 + * @reserved0: reserved + * @v: V coring level, u0.13, range [0.0, 1.0], default 0.0 + * @reserved1: reserved + */ +struct ipu3_uapi_yuvp1_chnr_coring_config { + __u32 u:13; + __u32 reserved0:3; + __u32 v:13; + __u32 reserved1:3; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_chnr_sense_gain_config - Chroma noise reduction gains + * + * All sensitivity gain parameters have precision u13.0, range [0, 8191]. + * + * @vy: Sensitivity of horizontal edge of Y, default 100 + * @vu: Sensitivity of horizontal edge of U, default 100 + * @vv: Sensitivity of horizontal edge of V, default 100 + * @reserved0: reserved + * @hy: Sensitivity of vertical edge of Y, default 50 + * @hu: Sensitivity of vertical edge of U, default 50 + * @hv: Sensitivity of vertical edge of V, default 50 + * @reserved1: reserved + */ +struct ipu3_uapi_yuvp1_chnr_sense_gain_config { + __u32 vy:8; + __u32 vu:8; + __u32 vv:8; + __u32 reserved0:8; + + __u32 hy:8; + __u32 hu:8; + __u32 hv:8; + __u32 reserved1:8; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_chnr_iir_fir_config - Chroma IIR/FIR filter config + * + * @fir_0h: Value of center tap in horizontal FIR, range [0, 32], default 8. + * @reserved0: reserved + * @fir_1h: Value of distance 1 in horizontal FIR, range [0, 32], default 12. + * @reserved1: reserved + * @fir_2h: Value of distance 2 tap in horizontal FIR, range [0, 32], default 0. + * @dalpha_clip_val: weight for previous row in IIR, range [1, 256], default 0. + * @reserved2: reserved + */ +struct ipu3_uapi_yuvp1_chnr_iir_fir_config { + __u32 fir_0h:6; + __u32 reserved0:2; + __u32 fir_1h:6; + __u32 reserved1:2; + __u32 fir_2h:6; + __u32 dalpha_clip_val:9; + __u32 reserved2:1; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_chnr_config - Chroma noise reduction config + * + * @enable: chroma noise reduction enable, see + * &ipu3_uapi_yuvp1_chnr_enable_config + * @coring: coring config for chroma noise reduction, see + * &ipu3_uapi_yuvp1_chnr_coring_config + * @sense_gain: sensitivity config for chroma noise reduction, see + * ipu3_uapi_yuvp1_chnr_sense_gain_config + * @iir_fir: iir and fir config for chroma noise reduction, see + * ipu3_uapi_yuvp1_chnr_iir_fir_config + */ +struct ipu3_uapi_yuvp1_chnr_config { + struct ipu3_uapi_yuvp1_chnr_enable_config enable; + struct ipu3_uapi_yuvp1_chnr_coring_config coring; + struct ipu3_uapi_yuvp1_chnr_sense_gain_config sense_gain; + struct ipu3_uapi_yuvp1_chnr_iir_fir_config iir_fir; +} __attribute__((packed)); + +/* Edge Enhancement and Noise Reduction */ + +/** + * struct ipu3_uapi_yuvp1_y_ee_nr_lpf_config - Luma(Y) edge enhancement low-pass + * filter coefficients + * + * @a_diag: Smoothing diagonal coefficient, u5.0. + * @reserved0: reserved + * @a_periph: Image smoothing perpherial, u5.0. + * @reserved1: reserved + * @a_cent: Image Smoothing center coefficient, u5.0. + * @reserved2: reserved + * @enable: 0: Y_EE_NR disabled, output = input; 1: Y_EE_NR enabled. + */ +struct ipu3_uapi_yuvp1_y_ee_nr_lpf_config { + __u32 a_diag:5; + __u32 reserved0:3; + __u32 a_periph:5; + __u32 reserved1:3; + __u32 a_cent:5; + __u32 reserved2:9; + __u32 enable:1; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_y_ee_nr_sense_config - Luma(Y) edge enhancement + * noise reduction sensitivity gains + * + * @edge_sense_0: Sensitivity of edge in dark area. u13.0, default 8191. + * @reserved0: reserved + * @delta_edge_sense: Difference in the sensitivity of edges between + * the bright and dark areas. u13.0, default 0. + * @reserved1: reserved + * @corner_sense_0: Sensitivity of corner in dark area. u13.0, default 0. + * @reserved2: reserved + * @delta_corner_sense: Difference in the sensitivity of corners between + * the bright and dark areas. u13.0, default 8191. + * @reserved3: reserved + */ +struct ipu3_uapi_yuvp1_y_ee_nr_sense_config { + __u32 edge_sense_0:13; + __u32 reserved0:3; + __u32 delta_edge_sense:13; + __u32 reserved1:3; + __u32 corner_sense_0:13; + __u32 reserved2:3; + __u32 delta_corner_sense:13; + __u32 reserved3:3; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_y_ee_nr_gain_config - Luma(Y) edge enhancement + * noise reduction gain config + * + * @gain_pos_0: Gain for positive edge in dark area. u5.0, [0, 16], default 2. + * @reserved0: reserved + * @delta_gain_posi: Difference in the gain of edges between the bright and + * dark areas for positive edges. u5.0, [0, 16], default 0. + * @reserved1: reserved + * @gain_neg_0: Gain for negative edge in dark area. u5.0, [0, 16], default 8. + * @reserved2: reserved + * @delta_gain_neg: Difference in the gain of edges between the bright and + * dark areas for negative edges. u5.0, [0, 16], default 0. + * @reserved3: reserved + */ +struct ipu3_uapi_yuvp1_y_ee_nr_gain_config { + __u32 gain_pos_0:5; + __u32 reserved0:3; + __u32 delta_gain_posi:5; + __u32 reserved1:3; + __u32 gain_neg_0:5; + __u32 reserved2:3; + __u32 delta_gain_neg:5; + __u32 reserved3:3; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_y_ee_nr_clip_config - Luma(Y) edge enhancement + * noise reduction clipping config + * + * @clip_pos_0: Limit of positive edge in dark area + * u5, value [0, 16], default 8. + * @reserved0: reserved + * @delta_clip_posi: Difference in the limit of edges between the bright + * and dark areas for positive edges. + * u5, value [0, 16], default 8. + * @reserved1: reserved + * @clip_neg_0: Limit of negative edge in dark area + * u5, value [0, 16], default 8. + * @reserved2: reserved + * @delta_clip_neg: Difference in the limit of edges between the bright + * and dark areas for negative edges. + * u5, value [0, 16], default 8. + * @reserved3: reserved + */ +struct ipu3_uapi_yuvp1_y_ee_nr_clip_config { + __u32 clip_pos_0:5; + __u32 reserved0:3; + __u32 delta_clip_posi:5; + __u32 reserved1:3; + __u32 clip_neg_0:5; + __u32 reserved2:3; + __u32 delta_clip_neg:5; + __u32 reserved3:3; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_y_ee_nr_frng_config - Luma(Y) edge enhancement + * noise reduction fringe config + * + * @gain_exp: Common exponent of gains, u4, [0, 8], default 2. + * @reserved0: reserved + * @min_edge: Threshold for edge and smooth stitching, u13. + * @reserved1: reserved + * @lin_seg_param: Power of LinSeg, u4. + * @reserved2: reserved + * @t1: Parameter for enabling/disabling the edge enhancement, u1.0, [0, 1], + * default 1. + * @t2: Parameter for enabling/disabling the smoothing, u1.0, [0, 1], + * default 1. + * @reserved3: reserved + */ +struct ipu3_uapi_yuvp1_y_ee_nr_frng_config { + __u32 gain_exp:4; + __u32 reserved0:28; + __u32 min_edge:13; + __u32 reserved1:3; + __u32 lin_seg_param:4; + __u32 reserved2:4; + __u32 t1:1; + __u32 t2:1; + __u32 reserved3:6; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_y_ee_nr_diag_config - Luma(Y) edge enhancement + * noise reduction diagonal config + * + * @diag_disc_g: Coefficient that prioritize diagonal edge direction on + * horizontal or vertical for final enhancement. + * u4.0, [1, 15], default 1. + * @reserved0: reserved + * @hvw_hor: Weight of horizontal/vertical edge enhancement for hv edge. + * u2.2, [1, 15], default 4. + * @dw_hor: Weight of diagonal edge enhancement for hv edge. + * u2.2, [1, 15], default 1. + * @hvw_diag: Weight of horizontal/vertical edge enhancement for diagonal edge. + * u2.2, [1, 15], default 1. + * @dw_diag: Weight of diagonal edge enhancement for diagonal edge. + * u2.2, [1, 15], default 4. + * @reserved1: reserved + */ +struct ipu3_uapi_yuvp1_y_ee_nr_diag_config { + __u32 diag_disc_g:4; + __u32 reserved0:4; + __u32 hvw_hor:4; + __u32 dw_hor:4; + __u32 hvw_diag:4; + __u32 dw_diag:4; + __u32 reserved1:8; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config - Luma(Y) edge enhancement + * noise reduction false color correction (FCC) coring config + * + * @pos_0: Gain for positive edge in dark, u13.0, [0, 16], default 0. + * @reserved0: reserved + * @pos_delta: Gain for positive edge in bright, value: pos_0 + pos_delta <=16 + * u13.0, default 0. + * @reserved1: reserved + * @neg_0: Gain for negative edge in dark area, u13.0, range [0, 16], default 0. + * @reserved2: reserved + * @neg_delta: Gain for negative edge in bright area. neg_0 + neg_delta <=16 + * u13.0, default 0. + * @reserved3: reserved + * + * Coring is a simple soft thresholding technique. + */ +struct ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config { + __u32 pos_0:13; + __u32 reserved0:3; + __u32 pos_delta:13; + __u32 reserved1:3; + __u32 neg_0:13; + __u32 reserved2:3; + __u32 neg_delta:13; + __u32 reserved3:3; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp1_y_ee_nr_config - Edge enhancement and noise reduction + * + * @lpf: low-pass filter config. See &ipu3_uapi_yuvp1_y_ee_nr_lpf_config + * @sense: sensitivity config. See &ipu3_uapi_yuvp1_y_ee_nr_sense_config + * @gain: gain config as defined in &ipu3_uapi_yuvp1_y_ee_nr_gain_config + * @clip: clip config as defined in &ipu3_uapi_yuvp1_y_ee_nr_clip_config + * @frng: fringe config as defined in &ipu3_uapi_yuvp1_y_ee_nr_frng_config + * @diag: diagonal edge config. See &ipu3_uapi_yuvp1_y_ee_nr_diag_config + * @fc_coring: coring config for fringe control. See + * &ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config + */ +struct ipu3_uapi_yuvp1_y_ee_nr_config { + struct ipu3_uapi_yuvp1_y_ee_nr_lpf_config lpf; + struct ipu3_uapi_yuvp1_y_ee_nr_sense_config sense; + struct ipu3_uapi_yuvp1_y_ee_nr_gain_config gain; + struct ipu3_uapi_yuvp1_y_ee_nr_clip_config clip; + struct ipu3_uapi_yuvp1_y_ee_nr_frng_config frng; + struct ipu3_uapi_yuvp1_y_ee_nr_diag_config diag; + struct ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config fc_coring; +} __attribute__((packed)); + +/* Total Color Correction */ + +/** + * struct ipu3_uapi_yuvp2_tcc_gen_control_static_config - Total color correction + * general control config + * + * @en: 0 - TCC disabled. Output = input 1 - TCC enabled. + * @blend_shift: blend shift, Range[3, 4], default NA. + * @gain_according_to_y_only: 0: Gain is calculated according to YUV, + * 1: Gain is calculated according to Y only + * @reserved0: reserved + * @gamma: Final blending coefficients. Values[-16, 16], default NA. + * @reserved1: reserved + * @delta: Final blending coefficients. Values[-16, 16], default NA. + * @reserved2: reserved + */ +struct ipu3_uapi_yuvp2_tcc_gen_control_static_config { + __u32 en:1; + __u32 blend_shift:3; + __u32 gain_according_to_y_only:1; + __u32 reserved0:11; + __s32 gamma:5; + __u32 reserved1:3; + __s32 delta:5; + __u32 reserved2:3; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp2_tcc_macc_elem_static_config - Total color correction + * multi-axis color control (MACC) config + * + * @a: a coefficient for 2x2 MACC conversion matrix. + * @reserved0: reserved + * @b: b coefficient 2x2 MACC conversion matrix. + * @reserved1: reserved + * @c: c coefficient for 2x2 MACC conversion matrix. + * @reserved2: reserved + * @d: d coefficient for 2x2 MACC conversion matrix. + * @reserved3: reserved + */ +struct ipu3_uapi_yuvp2_tcc_macc_elem_static_config { + __s32 a:12; + __u32 reserved0:4; + __s32 b:12; + __u32 reserved1:4; + __s32 c:12; + __u32 reserved2:4; + __s32 d:12; + __u32 reserved3:4; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp2_tcc_macc_table_static_config - Total color correction + * multi-axis color control (MACC) table array + * + * @entries: config for multi axis color correction, as specified by + * &ipu3_uapi_yuvp2_tcc_macc_elem_static_config + */ +struct ipu3_uapi_yuvp2_tcc_macc_table_static_config { + struct ipu3_uapi_yuvp2_tcc_macc_elem_static_config + entries[IPU3_UAPI_YUVP2_TCC_MACC_TABLE_ELEMENTS]; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp2_tcc_inv_y_lut_static_config - Total color correction + * inverse y lookup table + * + * @entries: lookup table for inverse y estimation, and use it to estimate the + * ratio between luma and chroma. Chroma by approximate the absolute + * value of the radius on the chroma plane (R = sqrt(u^2+v^2) ) and + * luma by approximate by 1/Y. + */ +struct ipu3_uapi_yuvp2_tcc_inv_y_lut_static_config { + __u16 entries[IPU3_UAPI_YUVP2_TCC_INV_Y_LUT_ELEMENTS]; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config - Total color + * correction lookup table for PCWL + * + * @entries: lookup table for gain piece wise linear transformation (PCWL) + */ +struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config { + __u16 entries[IPU3_UAPI_YUVP2_TCC_GAIN_PCWL_LUT_ELEMENTS]; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config - Total color correction + * lookup table for r square root + * + * @entries: lookup table for r square root estimation + */ +struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config { + __s16 entries[IPU3_UAPI_YUVP2_TCC_R_SQR_LUT_ELEMENTS]; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_yuvp2_tcc_static_config- Total color correction static + * + * @gen_control: general config for Total Color Correction + * @macc_table: config for multi axis color correction + * @inv_y_lut: lookup table for inverse y estimation + * @gain_pcwl: lookup table for gain PCWL + * @r_sqr_lut: lookup table for r square root estimation. + */ +struct ipu3_uapi_yuvp2_tcc_static_config { + struct ipu3_uapi_yuvp2_tcc_gen_control_static_config gen_control; + struct ipu3_uapi_yuvp2_tcc_macc_table_static_config macc_table; + struct ipu3_uapi_yuvp2_tcc_inv_y_lut_static_config inv_y_lut; + struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config gain_pcwl; + struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config r_sqr_lut; +} __attribute__((packed)); + +/* Advanced Noise Reduction related structs */ + +/* + * struct ipu3_uapi_anr_alpha - Advanced noise reduction alpha + * + * Tunable parameters that are subject to modification according to the + * total gain used. + */ +struct ipu3_uapi_anr_alpha { + __u16 gr; + __u16 r; + __u16 b; + __u16 gb; + __u16 dc_gr; + __u16 dc_r; + __u16 dc_b; + __u16 dc_gb; +} __attribute__((packed)); + +/* + * struct ipu3_uapi_anr_beta - Advanced noise reduction beta + * + * Tunable parameters that are subject to modification according to the + * total gain used. + */ +struct ipu3_uapi_anr_beta { + __u16 beta_gr; + __u16 beta_r; + __u16 beta_b; + __u16 beta_gb; +} __attribute__((packed)); + +/* + * struct ipu3_uapi_anr_plane_color - Advanced noise reduction per plane R, Gr, + * Gb and B register settings + * + * Tunable parameters that are subject to modification according to the + * total gain used. + */ +struct ipu3_uapi_anr_plane_color { + __u16 reg_w_gr[16]; + __u16 reg_w_r[16]; + __u16 reg_w_b[16]; + __u16 reg_w_gb[16]; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_anr_transform_config - Advanced noise reduction transform + * + * @enable: advanced noise reduction enabled. + * @adaptive_treshhold_en: On IPU3, adaptive threshold is always enabled. + * @reserved1: reserved + * @reserved2: reserved + * @alpha: using following defaults: + * 13, 13, 13, 13, 0, 0, 0, 0 + * 11, 11, 11, 11, 0, 0, 0, 0 + * 14, 14, 14, 14, 0, 0, 0, 0 + * @beta: use following defaults: + * 24, 24, 24, 24 + * 21, 20, 20, 21 + * 25, 25, 25, 25 + * @color: use defaults defined in driver/media/pci/intel/ipu3-tables.c + * @sqrt_lut: 11 bits per element, values = + * [724 768 810 849 887 + * 923 958 991 1024 1056 + * 1116 1145 1173 1201 1086 + * 1228 1254 1280 1305 1330 + * 1355 1379 1402 1425 1448] + * @xreset: Reset value of X for r^2 calculation Value: col_start-X_center + * Constraint: Xreset + FrameWdith=4095 Xreset= -4095, default -1632. + * @reserved3: reserved + * @yreset: Reset value of Y for r^2 calculation Value: row_start-Y_center + * Constraint: Yreset + FrameHeight=4095 Yreset= -4095, default -1224. + * @reserved4: reserved + * @x_sqr_reset: Reset value of X^2 for r^2 calculation Value = (Xreset)^2 + * @r_normfactor: Normalization factor for R. Default 14. + * @reserved5: reserved + * @y_sqr_reset: Reset value of Y^2 for r^2 calculation Value = (Yreset)^2 + * @gain_scale: Parameter describing shading gain as a function of distance + * from the image center. + * A single value per frame, loaded by the driver. Default 115. + */ +struct ipu3_uapi_anr_transform_config { + __u32 enable:1; /* 0 or 1, disabled or enabled */ + __u32 adaptive_treshhold_en:1; /* On IPU3, always enabled */ + + __u32 reserved1:30; + __u8 reserved2[44]; + + struct ipu3_uapi_anr_alpha alpha[3]; + struct ipu3_uapi_anr_beta beta[3]; + struct ipu3_uapi_anr_plane_color color[3]; + + __u16 sqrt_lut[IPU3_UAPI_ANR_LUT_SIZE]; /* 11 bits per element */ + + __s16 xreset:13; + __u16 reserved3:3; + __s16 yreset:13; + __u16 reserved4:3; + + __u32 x_sqr_reset:24; + __u32 r_normfactor:5; + __u32 reserved5:3; + + __u32 y_sqr_reset:24; + __u32 gain_scale:8; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_anr_stitch_pyramid - ANR stitch pyramid + * + * @entry0: pyramid LUT entry0, range [0x0, 0x3f] + * @entry1: pyramid LUT entry1, range [0x0, 0x3f] + * @entry2: pyramid LUT entry2, range [0x0, 0x3f] + * @reserved: reserved + */ +struct ipu3_uapi_anr_stitch_pyramid { + __u32 entry0:6; + __u32 entry1:6; + __u32 entry2:6; + __u32 reserved:14; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_anr_stitch_config - ANR stitch config + * + * @anr_stitch_en: enable stitch. Enabled with 1. + * @reserved: reserved + * @pyramid: pyramid table as defined by &ipu3_uapi_anr_stitch_pyramid + * default values: + * { 1, 3, 5 }, { 7, 7, 5 }, { 3, 1, 3 }, + * { 9, 15, 21 }, { 21, 15, 9 }, { 3, 5, 15 }, + * { 25, 35, 35 }, { 25, 15, 5 }, { 7, 21, 35 }, + * { 49, 49, 35 }, { 21, 7, 7 }, { 21, 35, 49 }, + * { 49, 35, 21 }, { 7, 5, 15 }, { 25, 35, 35 }, + * { 25, 15, 5 }, { 3, 9, 15 }, { 21, 21, 15 }, + * { 9, 3, 1 }, { 3, 5, 7 }, { 7, 5, 3}, { 1 } + */ +struct ipu3_uapi_anr_stitch_config { + __u32 anr_stitch_en; + __u8 reserved[44]; + struct ipu3_uapi_anr_stitch_pyramid pyramid[IPU3_UAPI_ANR_PYRAMID_SIZE]; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_anr_config - ANR config + * + * @transform: advanced noise reduction transform config as specified by + * &ipu3_uapi_anr_transform_config + * @stitch: create 4x4 patch from 4 surrounding 8x8 patches. + */ +struct ipu3_uapi_anr_config { + struct ipu3_uapi_anr_transform_config transform __attribute__((aligned(32))); + struct ipu3_uapi_anr_stitch_config stitch __attribute__((aligned(32))); +} __attribute__((packed)); + +/** + * struct ipu3_uapi_acc_param - Accelerator cluster parameters + * + * ACC refers to the HW cluster containing all Fixed Functions (FFs). Each FF + * implements a specific algorithm. + * + * @bnr: parameters for bayer noise reduction static config. See + * &ipu3_uapi_bnr_static_config + * @green_disparity: disparity static config between gr and gb channel. + * See &ipu3_uapi_bnr_static_config_green_disparity + * @dm: de-mosaic config. See &ipu3_uapi_dm_config + * @ccm: color correction matrix. See &ipu3_uapi_ccm_mat_config + * @gamma: gamma correction config. See &ipu3_uapi_gamma_config + * @csc: color space conversion matrix. See &ipu3_uapi_csc_mat_config + * @cds: color down sample config. See &ipu3_uapi_cds_params + * @shd: lens shading correction config. See &ipu3_uapi_shd_config + * @iefd: Image enhancement filter and denoise config. + * &ipu3_uapi_yuvp1_iefd_config + * @yds_c0: y down scaler config. &ipu3_uapi_yuvp1_yds_config + * @chnr_c0: chroma noise reduction config. &ipu3_uapi_yuvp1_chnr_config + * @y_ee_nr: y edge enhancement and noise reduction config. + * &ipu3_uapi_yuvp1_y_ee_nr_config + * @yds: y down scaler config. See &ipu3_uapi_yuvp1_yds_config + * @chnr: chroma noise reduction config. See &ipu3_uapi_yuvp1_chnr_config + * @yds2: y channel down scaler config. See &ipu3_uapi_yuvp1_yds_config + * @tcc: total color correction config as defined in struct + * &ipu3_uapi_yuvp2_tcc_static_config + * @anr: advanced noise reduction config.See &ipu3_uapi_anr_config + * @awb_fr: AWB filter response config. See ipu3_uapi_awb_fr_config + * @ae: auto exposure config As specified by &ipu3_uapi_ae_config + * @af: auto focus config. As specified by &ipu3_uapi_af_config + * @awb: auto white balance config. As specified by &ipu3_uapi_awb_config + */ +struct ipu3_uapi_acc_param { + struct ipu3_uapi_bnr_static_config bnr; + struct ipu3_uapi_bnr_static_config_green_disparity + green_disparity __attribute__((aligned(32))); + struct ipu3_uapi_dm_config dm __attribute__((aligned(32))); + struct ipu3_uapi_ccm_mat_config ccm __attribute__((aligned(32))); + struct ipu3_uapi_gamma_config gamma __attribute__((aligned(32))); + struct ipu3_uapi_csc_mat_config csc __attribute__((aligned(32))); + struct ipu3_uapi_cds_params cds __attribute__((aligned(32))); + struct ipu3_uapi_shd_config shd __attribute__((aligned(32))); + struct ipu3_uapi_yuvp1_iefd_config iefd __attribute__((aligned(32))); + struct ipu3_uapi_yuvp1_yds_config yds_c0 __attribute__((aligned(32))); + struct ipu3_uapi_yuvp1_chnr_config chnr_c0 __attribute__((aligned(32))); + struct ipu3_uapi_yuvp1_y_ee_nr_config y_ee_nr __attribute__((aligned(32))); + struct ipu3_uapi_yuvp1_yds_config yds __attribute__((aligned(32))); + struct ipu3_uapi_yuvp1_chnr_config chnr __attribute__((aligned(32))); + struct ipu3_uapi_yuvp1_yds_config yds2 __attribute__((aligned(32))); + struct ipu3_uapi_yuvp2_tcc_static_config tcc __attribute__((aligned(32))); + struct ipu3_uapi_anr_config anr; + struct ipu3_uapi_awb_fr_config_s awb_fr; + struct ipu3_uapi_ae_config ae; + struct ipu3_uapi_af_config_s af; + struct ipu3_uapi_awb_config awb; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_isp_lin_vmem_params - Linearization parameters + * + * @lin_lutlow_gr: linearization look-up table for GR channel interpolation. + * @lin_lutlow_r: linearization look-up table for R channel interpolation. + * @lin_lutlow_b: linearization look-up table for B channel interpolation. + * @lin_lutlow_gb: linearization look-up table for GB channel interpolation. + * lin_lutlow_gr / lin_lutlow_r / lin_lutlow_b / + * lin_lutlow_gb <= LIN_MAX_VALUE - 1. + * @lin_lutdif_gr: lin_lutlow_gr[i+1] - lin_lutlow_gr[i]. + * @lin_lutdif_r: lin_lutlow_r[i+1] - lin_lutlow_r[i]. + * @lin_lutdif_b: lin_lutlow_b[i+1] - lin_lutlow_b[i]. + * @lin_lutdif_gb: lin_lutlow_gb[i+1] - lin_lutlow_gb[i]. + */ +struct ipu3_uapi_isp_lin_vmem_params { + __s16 lin_lutlow_gr[IPU3_UAPI_LIN_LUT_SIZE]; + __s16 lin_lutlow_r[IPU3_UAPI_LIN_LUT_SIZE]; + __s16 lin_lutlow_b[IPU3_UAPI_LIN_LUT_SIZE]; + __s16 lin_lutlow_gb[IPU3_UAPI_LIN_LUT_SIZE]; + __s16 lin_lutdif_gr[IPU3_UAPI_LIN_LUT_SIZE]; + __s16 lin_lutdif_r[IPU3_UAPI_LIN_LUT_SIZE]; + __s16 lin_lutdif_b[IPU3_UAPI_LIN_LUT_SIZE]; + __s16 lin_lutdif_gb[IPU3_UAPI_LIN_LUT_SIZE]; +} __attribute__((packed)); + +/* Temporal Noise Reduction */ + +/** + * struct ipu3_uapi_isp_tnr3_vmem_params - Temporal noise reduction vector + * memory parameters + * + * @slope: slope setting in interpolation curve for temporal noise reduction. + * @reserved1: reserved + * @sigma: knee point setting in interpolation curve for temporal + * noise reduction. + * @reserved2: reserved + */ +struct ipu3_uapi_isp_tnr3_vmem_params { + __u16 slope[IPU3_UAPI_ISP_TNR3_VMEM_LEN]; + __u16 reserved1[IPU3_UAPI_ISP_VEC_ELEMS + - IPU3_UAPI_ISP_TNR3_VMEM_LEN]; + __u16 sigma[IPU3_UAPI_ISP_TNR3_VMEM_LEN]; + __u16 reserved2[IPU3_UAPI_ISP_VEC_ELEMS + - IPU3_UAPI_ISP_TNR3_VMEM_LEN]; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_isp_tnr3_params - Temporal noise reduction v3 parameters + * + * @knee_y1: Knee point TNR3 assumes standard deviation of Y,U and + * V at Y1 are TnrY1_Sigma_Y, U and V. + * @knee_y2: Knee point TNR3 assumes standard deviation of Y,U and + * V at Y2 are TnrY2_Sigma_Y, U and V. + * @maxfb_y: Max feedback gain for Y + * @maxfb_u: Max feedback gain for U + * @maxfb_v: Max feedback gain for V + * @round_adj_y: rounding Adjust for Y + * @round_adj_u: rounding Adjust for U + * @round_adj_v: rounding Adjust for V + * @ref_buf_select: selection of the reference frame buffer to be used. + */ +struct ipu3_uapi_isp_tnr3_params { + __u32 knee_y1; + __u32 knee_y2; + __u32 maxfb_y; + __u32 maxfb_u; + __u32 maxfb_v; + __u32 round_adj_y; + __u32 round_adj_u; + __u32 round_adj_v; + __u32 ref_buf_select; +} __attribute__((packed)); + +/* Extreme Noise Reduction version 3 */ + +/** + * struct ipu3_uapi_isp_xnr3_vmem_params - Extreme noise reduction v3 + * vector memory parameters + * + * @x: xnr3 parameters. + * @a: xnr3 parameters. + * @b: xnr3 parameters. + * @c: xnr3 parameters. + */ +struct ipu3_uapi_isp_xnr3_vmem_params { + __u16 x[IPU3_UAPI_ISP_VEC_ELEMS]; + __u16 a[IPU3_UAPI_ISP_VEC_ELEMS]; + __u16 b[IPU3_UAPI_ISP_VEC_ELEMS]; + __u16 c[IPU3_UAPI_ISP_VEC_ELEMS]; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_xnr3_alpha_params - Extreme noise reduction v3 + * alpha tuning parameters + * + * @y0: Sigma for Y range similarity in dark area. + * @u0: Sigma for U range similarity in dark area. + * @v0: Sigma for V range similarity in dark area. + * @ydiff: Sigma difference for Y between bright area and dark area. + * @udiff: Sigma difference for U between bright area and dark area. + * @vdiff: Sigma difference for V between bright area and dark area. + */ +struct ipu3_uapi_xnr3_alpha_params { + __u32 y0; + __u32 u0; + __u32 v0; + __u32 ydiff; + __u32 udiff; + __u32 vdiff; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_xnr3_coring_params - Extreme noise reduction v3 + * coring parameters + * + * @u0: Coring Threshold of U channel in dark area. + * @v0: Coring Threshold of V channel in dark area. + * @udiff: Threshold difference of U channel between bright and dark area. + * @vdiff: Threshold difference of V channel between bright and dark area. + */ +struct ipu3_uapi_xnr3_coring_params { + __u32 u0; + __u32 v0; + __u32 udiff; + __u32 vdiff; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_xnr3_blending_params - Blending factor + * + * @strength: The factor for blending output with input. This is tuning + * parameterHigher values lead to more aggressive XNR operation. + */ +struct ipu3_uapi_xnr3_blending_params { + __u32 strength; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_isp_xnr3_params - Extreme noise reduction v3 parameters + * + * @alpha: parameters for xnr3 alpha. See &ipu3_uapi_xnr3_alpha_params + * @coring: parameters for xnr3 coring. See &ipu3_uapi_xnr3_coring_params + * @blending: parameters for xnr3 blending. See &ipu3_uapi_xnr3_blending_params + */ +struct ipu3_uapi_isp_xnr3_params { + struct ipu3_uapi_xnr3_alpha_params alpha; + struct ipu3_uapi_xnr3_coring_params coring; + struct ipu3_uapi_xnr3_blending_params blending; +} __attribute__((packed)); + +/***** Obgrid (optical black level compensation) table entry *****/ + +/** + * struct ipu3_uapi_obgrid_param - Optical black level compensation parameters + * + * @gr: Grid table values for color GR + * @r: Grid table values for color R + * @b: Grid table values for color B + * @gb: Grid table values for color GB + * + * Black level is different for red, green, and blue channels. So black level + * compensation is different per channel. + */ +struct ipu3_uapi_obgrid_param { + __u16 gr; + __u16 r; + __u16 b; + __u16 gb; +} __attribute__((packed)); + +/******************* V4L2_META_FMT_IPU3_PARAMS *******************/ + +/** + * struct ipu3_uapi_flags - bits to indicate which pipeline needs update + * + * @gdc: 0 = no update, 1 = update. + * @obgrid: 0 = no update, 1 = update. + * @reserved1: Not used. + * @acc_bnr: 0 = no update, 1 = update. + * @acc_green_disparity: 0 = no update, 1 = update. + * @acc_dm: 0 = no update, 1 = update. + * @acc_ccm: 0 = no update, 1 = update. + * @acc_gamma: 0 = no update, 1 = update. + * @acc_csc: 0 = no update, 1 = update. + * @acc_cds: 0 = no update, 1 = update. + * @acc_shd: 0 = no update, 1 = update. + * @reserved2: Not used. + * @acc_iefd: 0 = no update, 1 = update. + * @acc_yds_c0: 0 = no update, 1 = update. + * @acc_chnr_c0: 0 = no update, 1 = update. + * @acc_y_ee_nr: 0 = no update, 1 = update. + * @acc_yds: 0 = no update, 1 = update. + * @acc_chnr: 0 = no update, 1 = update. + * @acc_ytm: 0 = no update, 1 = update. + * @acc_yds2: 0 = no update, 1 = update. + * @acc_tcc: 0 = no update, 1 = update. + * @acc_dpc: 0 = no update, 1 = update. + * @acc_bds: 0 = no update, 1 = update. + * @acc_anr: 0 = no update, 1 = update. + * @acc_awb_fr: 0 = no update, 1 = update. + * @acc_ae: 0 = no update, 1 = update. + * @acc_af: 0 = no update, 1 = update. + * @acc_awb: 0 = no update, 1 = update. + * @reserved3: Not used. + * @lin_vmem_params: 0 = no update, 1 = update. + * @tnr3_vmem_params: 0 = no update, 1 = update. + * @xnr3_vmem_params: 0 = no update, 1 = update. + * @tnr3_dmem_params: 0 = no update, 1 = update. + * @xnr3_dmem_params: 0 = no update, 1 = update. + * @reserved4: Not used. + * @obgrid_param: 0 = no update, 1 = update. + * @reserved5: Not used. + */ +struct ipu3_uapi_flags { + __u32 gdc:1; + __u32 obgrid:1; + __u32 reserved1:30; + + __u32 acc_bnr:1; + __u32 acc_green_disparity:1; + __u32 acc_dm:1; + __u32 acc_ccm:1; + __u32 acc_gamma:1; + __u32 acc_csc:1; + __u32 acc_cds:1; + __u32 acc_shd:1; + __u32 reserved2:2; + __u32 acc_iefd:1; + __u32 acc_yds_c0:1; + __u32 acc_chnr_c0:1; + __u32 acc_y_ee_nr:1; + __u32 acc_yds:1; + __u32 acc_chnr:1; + __u32 acc_ytm:1; + __u32 acc_yds2:1; + __u32 acc_tcc:1; + __u32 acc_dpc:1; + __u32 acc_bds:1; + __u32 acc_anr:1; + __u32 acc_awb_fr:1; + __u32 acc_ae:1; + __u32 acc_af:1; + __u32 acc_awb:1; + __u32 reserved3:4; + + __u32 lin_vmem_params:1; + __u32 tnr3_vmem_params:1; + __u32 xnr3_vmem_params:1; + __u32 tnr3_dmem_params:1; + __u32 xnr3_dmem_params:1; + __u32 reserved4:1; + __u32 obgrid_param:1; + __u32 reserved5:25; +} __attribute__((packed)); + +/** + * struct ipu3_uapi_params - V4L2_META_FMT_IPU3_PARAMS + * + * @use: select which parameters to apply, see &ipu3_uapi_flags + * @acc_param: ACC parameters, as specified by &ipu3_uapi_acc_param + * @lin_vmem_params: linearization VMEM, as specified by + * &ipu3_uapi_isp_lin_vmem_params + * @tnr3_vmem_params: tnr3 VMEM as specified by + * &ipu3_uapi_isp_tnr3_vmem_params + * @xnr3_vmem_params: xnr3 VMEM as specified by + * &ipu3_uapi_isp_xnr3_vmem_params + * @tnr3_dmem_params: tnr3 DMEM as specified by &ipu3_uapi_isp_tnr3_params + * @xnr3_dmem_params: xnr3 DMEM as specified by &ipu3_uapi_isp_xnr3_params + * @obgrid_param: obgrid parameters as specified by + * &ipu3_uapi_obgrid_param + * + * The video queue "parameters" is of format V4L2_META_FMT_IPU3_PARAMS. + * This is a "single plane" v4l2_meta_format using V4L2_BUF_TYPE_META_OUTPUT. + * + * struct ipu3_uapi_params as defined below contains a lot of parameters and + * ipu3_uapi_flags selects which parameters to apply. + */ +struct ipu3_uapi_params { + /* Flags which of the settings below are to be applied */ + struct ipu3_uapi_flags use __attribute__((aligned(32))); + + /* Accelerator cluster parameters */ + struct ipu3_uapi_acc_param acc_param; + + /* ISP vector address space parameters */ + struct ipu3_uapi_isp_lin_vmem_params lin_vmem_params; + struct ipu3_uapi_isp_tnr3_vmem_params tnr3_vmem_params; + struct ipu3_uapi_isp_xnr3_vmem_params xnr3_vmem_params; + + /* ISP data memory (DMEM) parameters */ + struct ipu3_uapi_isp_tnr3_params tnr3_dmem_params; + struct ipu3_uapi_isp_xnr3_params xnr3_dmem_params; + + /* Optical black level compensation */ + struct ipu3_uapi_obgrid_param obgrid_param; +} __attribute__((packed)); + +#endif /* __IPU3_UAPI_H */ diff --git a/spider-cam/libcamera/include/linux/media-bus-format.h b/spider-cam/libcamera/include/linux/media-bus-format.h new file mode 100644 index 0000000..d4c1d99 --- /dev/null +++ b/spider-cam/libcamera/include/linux/media-bus-format.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Media Bus API header + * + * Copyright (C) 2009, Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MEDIA_BUS_FORMAT_H +#define __LINUX_MEDIA_BUS_FORMAT_H + +/* + * These bus formats uniquely identify data formats on the data bus. Format 0 + * is reserved, MEDIA_BUS_FMT_FIXED shall be used by host-client pairs, where + * the data format is fixed. Additionally, "2X8" means that one pixel is + * transferred in two 8-bit samples, "BE" or "LE" specify in which order those + * samples are transferred over the bus: "LE" means that the least significant + * bits are transferred first, "BE" means that the most significant bits are + * transferred first, and "PADHI" and "PADLO" define which bits - low or high, + * in the incomplete high byte, are filled with padding bits. + * + * The bus formats are grouped by type, bus_width, bits per component, samples + * per pixel and order of subsamples. Numerical values are sorted using generic + * numerical sort order (8 thus comes before 10). + * + * As their value can't change when a new bus format is inserted in the + * enumeration, the bus formats are explicitly given a numerical value. The next + * free values for each category are listed below, update them when inserting + * new pixel codes. + */ + +#define MEDIA_BUS_FMT_FIXED 0x0001 + +/* RGB - next is 0x1026 */ +#define MEDIA_BUS_FMT_RGB444_1X12 0x1016 +#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001 +#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002 +#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE 0x1003 +#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE 0x1004 +#define MEDIA_BUS_FMT_RGB565_1X16 0x1017 +#define MEDIA_BUS_FMT_BGR565_2X8_BE 0x1005 +#define MEDIA_BUS_FMT_BGR565_2X8_LE 0x1006 +#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007 +#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008 +#define MEDIA_BUS_FMT_RGB666_1X18 0x1009 +#define MEDIA_BUS_FMT_RGB666_2X9_BE 0x1025 +#define MEDIA_BUS_FMT_BGR666_1X18 0x1023 +#define MEDIA_BUS_FMT_RBG888_1X24 0x100e +#define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015 +#define MEDIA_BUS_FMT_BGR666_1X24_CPADHI 0x1024 +#define MEDIA_BUS_FMT_RGB565_1X24_CPADHI 0x1022 +#define MEDIA_BUS_FMT_RGB666_1X7X3_SPWG 0x1010 +#define MEDIA_BUS_FMT_BGR888_1X24 0x1013 +#define MEDIA_BUS_FMT_BGR888_3X8 0x101b +#define MEDIA_BUS_FMT_GBR888_1X24 0x1014 +#define MEDIA_BUS_FMT_RGB888_1X24 0x100a +#define MEDIA_BUS_FMT_RGB888_2X12_BE 0x100b +#define MEDIA_BUS_FMT_RGB888_2X12_LE 0x100c +#define MEDIA_BUS_FMT_RGB888_3X8 0x101c +#define MEDIA_BUS_FMT_RGB888_3X8_DELTA 0x101d +#define MEDIA_BUS_FMT_RGB888_1X7X4_SPWG 0x1011 +#define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012 +#define MEDIA_BUS_FMT_RGB666_1X30_CPADLO 0x101e +#define MEDIA_BUS_FMT_RGB888_1X30_CPADLO 0x101f +#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d +#define MEDIA_BUS_FMT_RGB888_1X32_PADHI 0x100f +#define MEDIA_BUS_FMT_RGB101010_1X30 0x1018 +#define MEDIA_BUS_FMT_RGB666_1X36_CPADLO 0x1020 +#define MEDIA_BUS_FMT_RGB888_1X36_CPADLO 0x1021 +#define MEDIA_BUS_FMT_RGB121212_1X36 0x1019 +#define MEDIA_BUS_FMT_RGB161616_1X48 0x101a + +/* YUV (including grey) - next is 0x202f */ +#define MEDIA_BUS_FMT_Y8_1X8 0x2001 +#define MEDIA_BUS_FMT_UV8_1X8 0x2015 +#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002 +#define MEDIA_BUS_FMT_VYUY8_1_5X8 0x2003 +#define MEDIA_BUS_FMT_YUYV8_1_5X8 0x2004 +#define MEDIA_BUS_FMT_YVYU8_1_5X8 0x2005 +#define MEDIA_BUS_FMT_UYVY8_2X8 0x2006 +#define MEDIA_BUS_FMT_VYUY8_2X8 0x2007 +#define MEDIA_BUS_FMT_YUYV8_2X8 0x2008 +#define MEDIA_BUS_FMT_YVYU8_2X8 0x2009 +#define MEDIA_BUS_FMT_Y10_1X10 0x200a +#define MEDIA_BUS_FMT_Y10_2X8_PADHI_LE 0x202c +#define MEDIA_BUS_FMT_UYVY10_2X10 0x2018 +#define MEDIA_BUS_FMT_VYUY10_2X10 0x2019 +#define MEDIA_BUS_FMT_YUYV10_2X10 0x200b +#define MEDIA_BUS_FMT_YVYU10_2X10 0x200c +#define MEDIA_BUS_FMT_Y12_1X12 0x2013 +#define MEDIA_BUS_FMT_UYVY12_2X12 0x201c +#define MEDIA_BUS_FMT_VYUY12_2X12 0x201d +#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e +#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f +#define MEDIA_BUS_FMT_Y14_1X14 0x202d +#define MEDIA_BUS_FMT_Y16_1X16 0x202e +#define MEDIA_BUS_FMT_UYVY8_1X16 0x200f +#define MEDIA_BUS_FMT_VYUY8_1X16 0x2010 +#define MEDIA_BUS_FMT_YUYV8_1X16 0x2011 +#define MEDIA_BUS_FMT_YVYU8_1X16 0x2012 +#define MEDIA_BUS_FMT_YDYUYDYV8_1X16 0x2014 +#define MEDIA_BUS_FMT_UYVY10_1X20 0x201a +#define MEDIA_BUS_FMT_VYUY10_1X20 0x201b +#define MEDIA_BUS_FMT_YUYV10_1X20 0x200d +#define MEDIA_BUS_FMT_YVYU10_1X20 0x200e +#define MEDIA_BUS_FMT_VUY8_1X24 0x2024 +#define MEDIA_BUS_FMT_YUV8_1X24 0x2025 +#define MEDIA_BUS_FMT_UYYVYY8_0_5X24 0x2026 +#define MEDIA_BUS_FMT_UYVY12_1X24 0x2020 +#define MEDIA_BUS_FMT_VYUY12_1X24 0x2021 +#define MEDIA_BUS_FMT_YUYV12_1X24 0x2022 +#define MEDIA_BUS_FMT_YVYU12_1X24 0x2023 +#define MEDIA_BUS_FMT_YUV10_1X30 0x2016 +#define MEDIA_BUS_FMT_UYYVYY10_0_5X30 0x2027 +#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017 +#define MEDIA_BUS_FMT_UYYVYY12_0_5X36 0x2028 +#define MEDIA_BUS_FMT_YUV12_1X36 0x2029 +#define MEDIA_BUS_FMT_YUV16_1X48 0x202a +#define MEDIA_BUS_FMT_UYYVYY16_0_5X48 0x202b + +/* Bayer - next is 0x3021 */ +#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001 +#define MEDIA_BUS_FMT_SGBRG8_1X8 0x3013 +#define MEDIA_BUS_FMT_SGRBG8_1X8 0x3002 +#define MEDIA_BUS_FMT_SRGGB8_1X8 0x3014 +#define MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8 0x3015 +#define MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8 0x3016 +#define MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8 0x3017 +#define MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8 0x3018 +#define MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8 0x300b +#define MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8 0x300c +#define MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8 0x3009 +#define MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8 0x300d +#define MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE 0x3003 +#define MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE 0x3004 +#define MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE 0x3005 +#define MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE 0x3006 +#define MEDIA_BUS_FMT_SBGGR10_1X10 0x3007 +#define MEDIA_BUS_FMT_SGBRG10_1X10 0x300e +#define MEDIA_BUS_FMT_SGRBG10_1X10 0x300a +#define MEDIA_BUS_FMT_SRGGB10_1X10 0x300f +#define MEDIA_BUS_FMT_SBGGR12_1X12 0x3008 +#define MEDIA_BUS_FMT_SGBRG12_1X12 0x3010 +#define MEDIA_BUS_FMT_SGRBG12_1X12 0x3011 +#define MEDIA_BUS_FMT_SRGGB12_1X12 0x3012 +#define MEDIA_BUS_FMT_SBGGR14_1X14 0x3019 +#define MEDIA_BUS_FMT_SGBRG14_1X14 0x301a +#define MEDIA_BUS_FMT_SGRBG14_1X14 0x301b +#define MEDIA_BUS_FMT_SRGGB14_1X14 0x301c +#define MEDIA_BUS_FMT_SBGGR16_1X16 0x301d +#define MEDIA_BUS_FMT_SGBRG16_1X16 0x301e +#define MEDIA_BUS_FMT_SGRBG16_1X16 0x301f +#define MEDIA_BUS_FMT_SRGGB16_1X16 0x3020 + +/* JPEG compressed formats - next is 0x4002 */ +#define MEDIA_BUS_FMT_JPEG_1X8 0x4001 + +/* Vendor specific formats - next is 0x5002 */ + +/* S5C73M3 sensor specific interleaved UYVY and JPEG */ +#define MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8 0x5001 + +/* HSV - next is 0x6002 */ +#define MEDIA_BUS_FMT_AHSV8888_1X32 0x6001 + +/* + * This format should be used when the same driver handles + * both sides of the link and the bus format is a fixed + * metadata format that is not configurable from userspace. + * Width and height will be set to 0 for this format. + */ +#define MEDIA_BUS_FMT_METADATA_FIXED 0x7001 + +/* Generic line based metadata formats for serial buses. Next is 0x8008. */ +#define MEDIA_BUS_FMT_META_8 0x8001 +#define MEDIA_BUS_FMT_META_10 0x8002 +#define MEDIA_BUS_FMT_META_12 0x8003 +#define MEDIA_BUS_FMT_META_14 0x8004 +#define MEDIA_BUS_FMT_META_16 0x8005 +#define MEDIA_BUS_FMT_META_20 0x8006 +#define MEDIA_BUS_FMT_META_24 0x8007 + +#endif /* __LINUX_MEDIA_BUS_FORMAT_H */ diff --git a/spider-cam/libcamera/include/linux/media.h b/spider-cam/libcamera/include/linux/media.h new file mode 100644 index 0000000..b5a77bb --- /dev/null +++ b/spider-cam/libcamera/include/linux/media.h @@ -0,0 +1,420 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Multimedia device API + * + * Copyright (C) 2010 Nokia Corporation + * + * Contacts: Laurent Pinchart + * Sakari Ailus + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_MEDIA_H +#define __LINUX_MEDIA_H + +#include +#include + +struct media_device_info { + char driver[16]; + char model[32]; + char serial[40]; + char bus_info[32]; + __u32 media_version; + __u32 hw_revision; + __u32 driver_version; + __u32 reserved[31]; +}; + +/* + * Base number ranges for entity functions + * + * NOTE: Userspace should not rely on these ranges to identify a group + * of function types, as newer functions can be added with any name within + * the full u32 range. + * + * Some older functions use the MEDIA_ENT_F_OLD_*_BASE range. Do not + * change this, this is for backwards compatibility. When adding new + * functions always use MEDIA_ENT_F_BASE. + */ +#define MEDIA_ENT_F_BASE 0x00000000 +#define MEDIA_ENT_F_OLD_BASE 0x00010000 +#define MEDIA_ENT_F_OLD_SUBDEV_BASE 0x00020000 + +/* + * Initial value to be used when a new entity is created + * Drivers should change it to something useful. + */ +#define MEDIA_ENT_F_UNKNOWN MEDIA_ENT_F_BASE + +/* + * Subdevs are initialized with MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN in order + * to preserve backward compatibility. Drivers must change to the proper + * subdev type before registering the entity. + */ +#define MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN MEDIA_ENT_F_OLD_SUBDEV_BASE + +/* + * DVB entity functions + */ +#define MEDIA_ENT_F_DTV_DEMOD (MEDIA_ENT_F_BASE + 0x00001) +#define MEDIA_ENT_F_TS_DEMUX (MEDIA_ENT_F_BASE + 0x00002) +#define MEDIA_ENT_F_DTV_CA (MEDIA_ENT_F_BASE + 0x00003) +#define MEDIA_ENT_F_DTV_NET_DECAP (MEDIA_ENT_F_BASE + 0x00004) + +/* + * I/O entity functions + */ +#define MEDIA_ENT_F_IO_V4L (MEDIA_ENT_F_OLD_BASE + 1) +#define MEDIA_ENT_F_IO_DTV (MEDIA_ENT_F_BASE + 0x01001) +#define MEDIA_ENT_F_IO_VBI (MEDIA_ENT_F_BASE + 0x01002) +#define MEDIA_ENT_F_IO_SWRADIO (MEDIA_ENT_F_BASE + 0x01003) + +/* + * Sensor functions + */ +#define MEDIA_ENT_F_CAM_SENSOR (MEDIA_ENT_F_OLD_SUBDEV_BASE + 1) +#define MEDIA_ENT_F_FLASH (MEDIA_ENT_F_OLD_SUBDEV_BASE + 2) +#define MEDIA_ENT_F_LENS (MEDIA_ENT_F_OLD_SUBDEV_BASE + 3) + +/* + * Digital TV, analog TV, radio and/or software defined radio tuner functions. + * + * It is a responsibility of the master/bridge drivers to add connectors + * and links for MEDIA_ENT_F_TUNER. Please notice that some old tuners + * may require the usage of separate I2C chips to decode analog TV signals, + * when the master/bridge chipset doesn't have its own TV standard decoder. + * On such cases, the IF-PLL staging is mapped via one or two entities: + * MEDIA_ENT_F_IF_VID_DECODER and/or MEDIA_ENT_F_IF_AUD_DECODER. + */ +#define MEDIA_ENT_F_TUNER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 5) + +/* + * Analog TV IF-PLL decoder functions + * + * It is a responsibility of the master/bridge drivers to create links + * for MEDIA_ENT_F_IF_VID_DECODER and MEDIA_ENT_F_IF_AUD_DECODER. + */ +#define MEDIA_ENT_F_IF_VID_DECODER (MEDIA_ENT_F_BASE + 0x02001) +#define MEDIA_ENT_F_IF_AUD_DECODER (MEDIA_ENT_F_BASE + 0x02002) + +/* + * Audio entity functions + */ +#define MEDIA_ENT_F_AUDIO_CAPTURE (MEDIA_ENT_F_BASE + 0x03001) +#define MEDIA_ENT_F_AUDIO_PLAYBACK (MEDIA_ENT_F_BASE + 0x03002) +#define MEDIA_ENT_F_AUDIO_MIXER (MEDIA_ENT_F_BASE + 0x03003) + +/* + * Processing entity functions + */ +#define MEDIA_ENT_F_PROC_VIDEO_COMPOSER (MEDIA_ENT_F_BASE + 0x4001) +#define MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER (MEDIA_ENT_F_BASE + 0x4002) +#define MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV (MEDIA_ENT_F_BASE + 0x4003) +#define MEDIA_ENT_F_PROC_VIDEO_LUT (MEDIA_ENT_F_BASE + 0x4004) +#define MEDIA_ENT_F_PROC_VIDEO_SCALER (MEDIA_ENT_F_BASE + 0x4005) +#define MEDIA_ENT_F_PROC_VIDEO_STATISTICS (MEDIA_ENT_F_BASE + 0x4006) +#define MEDIA_ENT_F_PROC_VIDEO_ENCODER (MEDIA_ENT_F_BASE + 0x4007) +#define MEDIA_ENT_F_PROC_VIDEO_DECODER (MEDIA_ENT_F_BASE + 0x4008) +#define MEDIA_ENT_F_PROC_VIDEO_ISP (MEDIA_ENT_F_BASE + 0x4009) + +/* + * Switch and bridge entity functions + */ +#define MEDIA_ENT_F_VID_MUX (MEDIA_ENT_F_BASE + 0x5001) +#define MEDIA_ENT_F_VID_IF_BRIDGE (MEDIA_ENT_F_BASE + 0x5002) + +/* + * Video decoder/encoder functions + */ +#define MEDIA_ENT_F_ATV_DECODER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 4) +#define MEDIA_ENT_F_DV_DECODER (MEDIA_ENT_F_BASE + 0x6001) +#define MEDIA_ENT_F_DV_ENCODER (MEDIA_ENT_F_BASE + 0x6002) + +/* Entity flags */ +#define MEDIA_ENT_FL_DEFAULT (1U << 0) +#define MEDIA_ENT_FL_CONNECTOR (1U << 1) + +/* OR with the entity id value to find the next entity */ +#define MEDIA_ENT_ID_FLAG_NEXT (1U << 31) + +struct media_entity_desc { + __u32 id; + char name[32]; + __u32 type; + __u32 revision; + __u32 flags; + __u32 group_id; + __u16 pads; + __u16 links; + + __u32 reserved[4]; + + union { + /* Node specifications */ + struct { + __u32 major; + __u32 minor; + } dev; + + /* + * TODO: this shouldn't have been added without + * actual drivers that use this. When the first real driver + * appears that sets this information, special attention + * should be given whether this information is 1) enough, and + * 2) can deal with udev rules that rename devices. The struct + * dev would not be sufficient for this since that does not + * contain the subdevice information. In addition, struct dev + * can only refer to a single device, and not to multiple (e.g. + * pcm and mixer devices). + */ + struct { + __u32 card; + __u32 device; + __u32 subdevice; + } alsa; + + /* + * DEPRECATED: previous node specifications. Kept just to + * avoid breaking compilation. Use media_entity_desc.dev + * instead. + */ + struct { + __u32 major; + __u32 minor; + } v4l; + struct { + __u32 major; + __u32 minor; + } fb; + int dvb; + + /* Sub-device specifications */ + /* Nothing needed yet */ + __u8 raw[184]; + }; +}; + +#define MEDIA_PAD_FL_SINK (1U << 0) +#define MEDIA_PAD_FL_SOURCE (1U << 1) +#define MEDIA_PAD_FL_MUST_CONNECT (1U << 2) + +struct media_pad_desc { + __u32 entity; /* entity ID */ + __u16 index; /* pad index */ + __u32 flags; /* pad flags */ + __u32 reserved[2]; +}; + +#define MEDIA_LNK_FL_ENABLED (1U << 0) +#define MEDIA_LNK_FL_IMMUTABLE (1U << 1) +#define MEDIA_LNK_FL_DYNAMIC (1U << 2) + +#define MEDIA_LNK_FL_LINK_TYPE (0xf << 28) +# define MEDIA_LNK_FL_DATA_LINK (0U << 28) +# define MEDIA_LNK_FL_INTERFACE_LINK (1U << 28) +# define MEDIA_LNK_FL_ANCILLARY_LINK (2U << 28) + +struct media_link_desc { + struct media_pad_desc source; + struct media_pad_desc sink; + __u32 flags; + __u32 reserved[2]; +}; + +struct media_links_enum { + __u32 entity; + /* Should have enough room for pads elements */ + struct media_pad_desc *pads; + /* Should have enough room for links elements */ + struct media_link_desc *links; + __u32 reserved[4]; +}; + +/* Interface type ranges */ + +#define MEDIA_INTF_T_DVB_BASE 0x00000100 +#define MEDIA_INTF_T_V4L_BASE 0x00000200 + +/* Interface types */ + +#define MEDIA_INTF_T_DVB_FE (MEDIA_INTF_T_DVB_BASE) +#define MEDIA_INTF_T_DVB_DEMUX (MEDIA_INTF_T_DVB_BASE + 1) +#define MEDIA_INTF_T_DVB_DVR (MEDIA_INTF_T_DVB_BASE + 2) +#define MEDIA_INTF_T_DVB_CA (MEDIA_INTF_T_DVB_BASE + 3) +#define MEDIA_INTF_T_DVB_NET (MEDIA_INTF_T_DVB_BASE + 4) + +#define MEDIA_INTF_T_V4L_VIDEO (MEDIA_INTF_T_V4L_BASE) +#define MEDIA_INTF_T_V4L_VBI (MEDIA_INTF_T_V4L_BASE + 1) +#define MEDIA_INTF_T_V4L_RADIO (MEDIA_INTF_T_V4L_BASE + 2) +#define MEDIA_INTF_T_V4L_SUBDEV (MEDIA_INTF_T_V4L_BASE + 3) +#define MEDIA_INTF_T_V4L_SWRADIO (MEDIA_INTF_T_V4L_BASE + 4) +#define MEDIA_INTF_T_V4L_TOUCH (MEDIA_INTF_T_V4L_BASE + 5) + +#define MEDIA_INTF_T_ALSA_BASE 0x00000300 +#define MEDIA_INTF_T_ALSA_PCM_CAPTURE (MEDIA_INTF_T_ALSA_BASE) +#define MEDIA_INTF_T_ALSA_PCM_PLAYBACK (MEDIA_INTF_T_ALSA_BASE + 1) +#define MEDIA_INTF_T_ALSA_CONTROL (MEDIA_INTF_T_ALSA_BASE + 2) + + +/* + * MC next gen API definitions + */ + +/* + * Appeared in 4.19.0. + * + * The media_version argument comes from the media_version field in + * struct media_device_info. + */ +#define MEDIA_V2_ENTITY_HAS_FLAGS(media_version) \ + ((media_version) >= ((4U << 16) | (19U << 8) | 0U)) + +struct media_v2_entity { + __u32 id; + char name[64]; + __u32 function; /* Main function of the entity */ + __u32 flags; + __u32 reserved[5]; +} __attribute__ ((packed)); + +/* Should match the specific fields at media_intf_devnode */ +struct media_v2_intf_devnode { + __u32 major; + __u32 minor; +} __attribute__ ((packed)); + +struct media_v2_interface { + __u32 id; + __u32 intf_type; + __u32 flags; + __u32 reserved[9]; + + union { + struct media_v2_intf_devnode devnode; + __u32 raw[16]; + }; +} __attribute__ ((packed)); + +/* + * Appeared in 4.19.0. + * + * The media_version argument comes from the media_version field in + * struct media_device_info. + */ +#define MEDIA_V2_PAD_HAS_INDEX(media_version) \ + ((media_version) >= ((4U << 16) | (19U << 8) | 0U)) + +struct media_v2_pad { + __u32 id; + __u32 entity_id; + __u32 flags; + __u32 index; + __u32 reserved[4]; +} __attribute__ ((packed)); + +struct media_v2_link { + __u32 id; + __u32 source_id; + __u32 sink_id; + __u32 flags; + __u32 reserved[6]; +} __attribute__ ((packed)); + +struct media_v2_topology { + __u64 topology_version; + + __u32 num_entities; + __u32 reserved1; + __u64 ptr_entities; + + __u32 num_interfaces; + __u32 reserved2; + __u64 ptr_interfaces; + + __u32 num_pads; + __u32 reserved3; + __u64 ptr_pads; + + __u32 num_links; + __u32 reserved4; + __u64 ptr_links; +} __attribute__ ((packed)); + +/* ioctls */ + +#define MEDIA_IOC_DEVICE_INFO _IOWR('|', 0x00, struct media_device_info) +#define MEDIA_IOC_ENUM_ENTITIES _IOWR('|', 0x01, struct media_entity_desc) +#define MEDIA_IOC_ENUM_LINKS _IOWR('|', 0x02, struct media_links_enum) +#define MEDIA_IOC_SETUP_LINK _IOWR('|', 0x03, struct media_link_desc) +#define MEDIA_IOC_G_TOPOLOGY _IOWR('|', 0x04, struct media_v2_topology) +#define MEDIA_IOC_REQUEST_ALLOC _IOR ('|', 0x05, int) + +/* + * These ioctls are called on the request file descriptor as returned + * by MEDIA_IOC_REQUEST_ALLOC. + */ +#define MEDIA_REQUEST_IOC_QUEUE _IO('|', 0x80) +#define MEDIA_REQUEST_IOC_REINIT _IO('|', 0x81) + + +/* + * Legacy symbols used to avoid userspace compilation breakages. + * Do not use any of this in new applications! + * + * Those symbols map the entity function into types and should be + * used only on legacy programs for legacy hardware. Don't rely + * on those for MEDIA_IOC_G_TOPOLOGY. + */ +#define MEDIA_ENT_TYPE_SHIFT 16 +#define MEDIA_ENT_TYPE_MASK 0x00ff0000 +#define MEDIA_ENT_SUBTYPE_MASK 0x0000ffff + +#define MEDIA_ENT_T_DEVNODE_UNKNOWN (MEDIA_ENT_F_OLD_BASE | \ + MEDIA_ENT_SUBTYPE_MASK) + +#define MEDIA_ENT_T_DEVNODE MEDIA_ENT_F_OLD_BASE +#define MEDIA_ENT_T_DEVNODE_V4L MEDIA_ENT_F_IO_V4L +#define MEDIA_ENT_T_DEVNODE_FB (MEDIA_ENT_F_OLD_BASE + 2) +#define MEDIA_ENT_T_DEVNODE_ALSA (MEDIA_ENT_F_OLD_BASE + 3) +#define MEDIA_ENT_T_DEVNODE_DVB (MEDIA_ENT_F_OLD_BASE + 4) + +#define MEDIA_ENT_T_UNKNOWN MEDIA_ENT_F_UNKNOWN +#define MEDIA_ENT_T_V4L2_VIDEO MEDIA_ENT_F_IO_V4L +#define MEDIA_ENT_T_V4L2_SUBDEV MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN +#define MEDIA_ENT_T_V4L2_SUBDEV_SENSOR MEDIA_ENT_F_CAM_SENSOR +#define MEDIA_ENT_T_V4L2_SUBDEV_FLASH MEDIA_ENT_F_FLASH +#define MEDIA_ENT_T_V4L2_SUBDEV_LENS MEDIA_ENT_F_LENS +#define MEDIA_ENT_T_V4L2_SUBDEV_DECODER MEDIA_ENT_F_ATV_DECODER +#define MEDIA_ENT_T_V4L2_SUBDEV_TUNER MEDIA_ENT_F_TUNER + +#define MEDIA_ENT_F_DTV_DECODER MEDIA_ENT_F_DV_DECODER + +/* + * There is still no full ALSA support in the media controller. These + * defines should not have been added and we leave them here only + * in case some application tries to use these defines. + * + * The ALSA defines that are in use have been moved into __KERNEL__ + * scope. As support gets added to these interface types, they should + * be moved into __KERNEL__ scope with the code that uses them. + */ +#define MEDIA_INTF_T_ALSA_COMPRESS (MEDIA_INTF_T_ALSA_BASE + 3) +#define MEDIA_INTF_T_ALSA_RAWMIDI (MEDIA_INTF_T_ALSA_BASE + 4) +#define MEDIA_INTF_T_ALSA_HWDEP (MEDIA_INTF_T_ALSA_BASE + 5) +#define MEDIA_INTF_T_ALSA_SEQUENCER (MEDIA_INTF_T_ALSA_BASE + 6) +#define MEDIA_INTF_T_ALSA_TIMER (MEDIA_INTF_T_ALSA_BASE + 7) + +/* Obsolete symbol for media_version, no longer used in the kernel */ +#define MEDIA_API_VERSION ((0U << 16) | (1U << 8) | 0U) + + +#endif /* __LINUX_MEDIA_H */ diff --git a/spider-cam/libcamera/include/linux/rkisp1-config.h b/spider-cam/libcamera/include/linux/rkisp1-config.h new file mode 100644 index 0000000..f87c6bd --- /dev/null +++ b/spider-cam/libcamera/include/linux/rkisp1-config.h @@ -0,0 +1,999 @@ +/* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR MIT) */ +/* + * Rockchip ISP1 userspace API + * Copyright (C) 2017 Rockchip Electronics Co., Ltd. + */ + +#ifndef _RKISP1_CONFIG_H +#define _RKISP1_CONFIG_H + +#include + +/* Defect Pixel Cluster Detection */ +#define RKISP1_CIF_ISP_MODULE_DPCC (1U << 0) +/* Black Level Subtraction */ +#define RKISP1_CIF_ISP_MODULE_BLS (1U << 1) +/* Sensor De-gamma */ +#define RKISP1_CIF_ISP_MODULE_SDG (1U << 2) +/* Histogram statistics configuration */ +#define RKISP1_CIF_ISP_MODULE_HST (1U << 3) +/* Lens Shade Control */ +#define RKISP1_CIF_ISP_MODULE_LSC (1U << 4) +/* Auto White Balance Gain */ +#define RKISP1_CIF_ISP_MODULE_AWB_GAIN (1U << 5) +/* Filter */ +#define RKISP1_CIF_ISP_MODULE_FLT (1U << 6) +/* Bayer Demosaic */ +#define RKISP1_CIF_ISP_MODULE_BDM (1U << 7) +/* Cross Talk */ +#define RKISP1_CIF_ISP_MODULE_CTK (1U << 8) +/* Gamma Out Curve */ +#define RKISP1_CIF_ISP_MODULE_GOC (1U << 9) +/* Color Processing */ +#define RKISP1_CIF_ISP_MODULE_CPROC (1U << 10) +/* Auto Focus Control statistics configuration */ +#define RKISP1_CIF_ISP_MODULE_AFC (1U << 11) +/* Auto White Balancing statistics configuration */ +#define RKISP1_CIF_ISP_MODULE_AWB (1U << 12) +/* Image Effect */ +#define RKISP1_CIF_ISP_MODULE_IE (1U << 13) +/* Auto Exposure Control statistics configuration */ +#define RKISP1_CIF_ISP_MODULE_AEC (1U << 14) +/* Wide Dynamic Range */ +#define RKISP1_CIF_ISP_MODULE_WDR (1U << 15) +/* Denoise Pre-Filter */ +#define RKISP1_CIF_ISP_MODULE_DPF (1U << 16) +/* Denoise Pre-Filter Strength */ +#define RKISP1_CIF_ISP_MODULE_DPF_STRENGTH (1U << 17) + +#define RKISP1_CIF_ISP_CTK_COEFF_MAX 0x100 +#define RKISP1_CIF_ISP_CTK_OFFSET_MAX 0x800 + +#define RKISP1_CIF_ISP_AE_MEAN_MAX_V10 25 +#define RKISP1_CIF_ISP_AE_MEAN_MAX_V12 81 +#define RKISP1_CIF_ISP_AE_MEAN_MAX RKISP1_CIF_ISP_AE_MEAN_MAX_V12 + +#define RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10 16 +#define RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 32 +#define RKISP1_CIF_ISP_HIST_BIN_N_MAX RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 + +#define RKISP1_CIF_ISP_AFM_MAX_WINDOWS 3 +#define RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE 17 + +#define RKISP1_CIF_ISP_BDM_MAX_TH 0xff + +/* + * Black level compensation + */ +/* maximum value for horizontal start address */ +#define RKISP1_CIF_ISP_BLS_START_H_MAX 0x00000fff +/* maximum value for horizontal stop address */ +#define RKISP1_CIF_ISP_BLS_STOP_H_MAX 0x00000fff +/* maximum value for vertical start address */ +#define RKISP1_CIF_ISP_BLS_START_V_MAX 0x00000fff +/* maximum value for vertical stop address */ +#define RKISP1_CIF_ISP_BLS_STOP_V_MAX 0x00000fff +/* maximum is 2^18 = 262144*/ +#define RKISP1_CIF_ISP_BLS_SAMPLES_MAX 0x00000012 +/* maximum value for fixed black level */ +#define RKISP1_CIF_ISP_BLS_FIX_SUB_MAX 0x00000fff +/* minimum value for fixed black level */ +#define RKISP1_CIF_ISP_BLS_FIX_SUB_MIN 0xfffff000 +/* 13 bit range (signed)*/ +#define RKISP1_CIF_ISP_BLS_FIX_MASK 0x00001fff + +/* + * Automatic white balance measurements + */ +#define RKISP1_CIF_ISP_AWB_MAX_GRID 1 +#define RKISP1_CIF_ISP_AWB_MAX_FRAMES 7 + +/* + * Gamma out + */ +/* Maximum number of color samples supported */ +#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10 17 +#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 34 +#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 + +/* + * Lens shade correction + */ +#define RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE 8 + +/* + * The following matches the tuning process, + * not the max capabilities of the chip. + */ +#define RKISP1_CIF_ISP_LSC_SAMPLES_MAX 17 + +/* + * Histogram calculation + */ +#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10 25 +#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12 81 +#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12 + +/* + * Defect Pixel Cluster Correction + */ +#define RKISP1_CIF_ISP_DPCC_METHODS_MAX 3 + +#define RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE (1U << 2) + +#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_G_CENTER (1U << 0) +#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_RB_CENTER (1U << 1) +#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_G_3X3 (1U << 2) +#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_RB_3X3 (1U << 3) + +/* 0-2 for sets 1-3 */ +#define RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_SET(n) ((n) << 0) +#define RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_FIX_SET (1U << 3) + +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_GREEN_ENABLE (1U << 0) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_GREEN_ENABLE (1U << 1) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_GREEN_ENABLE (1U << 2) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_GREEN_ENABLE (1U << 3) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_GREEN_ENABLE (1U << 4) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_RED_BLUE_ENABLE (1U << 8) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_RED_BLUE_ENABLE (1U << 9) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_RED_BLUE_ENABLE (1U << 10) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_RED_BLUE_ENABLE (1U << 11) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_RED_BLUE_ENABLE (1U << 12) + +#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_G(v) ((v) << 0) +#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_RB(v) ((v) << 8) +#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_G(v) ((v) << 0) +#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_RB(v) ((v) << 8) +#define RKISP1_CIF_ISP_DPCC_PG_FAC_G(v) ((v) << 0) +#define RKISP1_CIF_ISP_DPCC_PG_FAC_RB(v) ((v) << 8) +#define RKISP1_CIF_ISP_DPCC_RND_THRESH_G(v) ((v) << 0) +#define RKISP1_CIF_ISP_DPCC_RND_THRESH_RB(v) ((v) << 8) +#define RKISP1_CIF_ISP_DPCC_RG_FAC_G(v) ((v) << 0) +#define RKISP1_CIF_ISP_DPCC_RG_FAC_RB(v) ((v) << 8) + +#define RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_G(n, v) ((v) << ((n) * 4)) +#define RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_RB(n, v) ((v) << ((n) * 4 + 2)) + +#define RKISP1_CIF_ISP_DPCC_RND_OFFS_n_G(n, v) ((v) << ((n) * 4)) +#define RKISP1_CIF_ISP_DPCC_RND_OFFS_n_RB(n, v) ((v) << ((n) * 4 + 2)) + +/* + * Denoising pre filter + */ +#define RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS 17 +#define RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS 6 + +/* + * Measurement types + */ +#define RKISP1_CIF_ISP_STAT_AWB (1U << 0) +#define RKISP1_CIF_ISP_STAT_AUTOEXP (1U << 1) +#define RKISP1_CIF_ISP_STAT_AFM (1U << 2) +#define RKISP1_CIF_ISP_STAT_HIST (1U << 3) + +/** + * enum rkisp1_cif_isp_version - ISP variants + * + * @RKISP1_V10: Used at least in RK3288 and RK3399. + * @RKISP1_V11: Declared in the original vendor code, but not used. Same number + * of entries in grids and histogram as v10. + * @RKISP1_V12: Used at least in RK3326 and PX30. + * @RKISP1_V13: Used at least in RK1808. Same number of entries in grids and + * histogram as v12. + * @RKISP1_V_IMX8MP: Used in at least i.MX8MP. Same number of entries in grids + * and histogram as v10. + */ +enum rkisp1_cif_isp_version { + RKISP1_V10 = 10, + RKISP1_V11, + RKISP1_V12, + RKISP1_V13, + RKISP1_V_IMX8MP, +}; + +enum rkisp1_cif_isp_histogram_mode { + RKISP1_CIF_ISP_HISTOGRAM_MODE_DISABLE, + RKISP1_CIF_ISP_HISTOGRAM_MODE_RGB_COMBINED, + RKISP1_CIF_ISP_HISTOGRAM_MODE_R_HISTOGRAM, + RKISP1_CIF_ISP_HISTOGRAM_MODE_G_HISTOGRAM, + RKISP1_CIF_ISP_HISTOGRAM_MODE_B_HISTOGRAM, + RKISP1_CIF_ISP_HISTOGRAM_MODE_Y_HISTOGRAM +}; + +enum rkisp1_cif_isp_awb_mode_type { + RKISP1_CIF_ISP_AWB_MODE_MANUAL, + RKISP1_CIF_ISP_AWB_MODE_RGB, + RKISP1_CIF_ISP_AWB_MODE_YCBCR +}; + +enum rkisp1_cif_isp_flt_mode { + RKISP1_CIF_ISP_FLT_STATIC_MODE, + RKISP1_CIF_ISP_FLT_DYNAMIC_MODE +}; + +/** + * enum rkisp1_cif_isp_exp_ctrl_autostop - stop modes + * @RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0: continuous measurement + * @RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_1: stop measuring after a complete frame + */ +enum rkisp1_cif_isp_exp_ctrl_autostop { + RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0 = 0, + RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_1 = 1, +}; + +/** + * enum rkisp1_cif_isp_exp_meas_mode - Exposure measure mode + * @RKISP1_CIF_ISP_EXP_MEASURING_MODE_0: Y = 16 + 0.25R + 0.5G + 0.1094B + * @RKISP1_CIF_ISP_EXP_MEASURING_MODE_1: Y = (R + G + B) x (85/256) + */ +enum rkisp1_cif_isp_exp_meas_mode { + RKISP1_CIF_ISP_EXP_MEASURING_MODE_0, + RKISP1_CIF_ISP_EXP_MEASURING_MODE_1, +}; + +/*---------- PART1: Input Parameters ------------*/ + +/** + * struct rkisp1_cif_isp_window - measurement window. + * + * Measurements are calculated per window inside the frame. + * This struct represents a window for a measurement. + * + * @h_offs: the horizontal offset of the window from the left of the frame in pixels. + * @v_offs: the vertical offset of the window from the top of the frame in pixels. + * @h_size: the horizontal size of the window in pixels + * @v_size: the vertical size of the window in pixels. + */ +struct rkisp1_cif_isp_window { + __u16 h_offs; + __u16 v_offs; + __u16 h_size; + __u16 v_size; +}; + +/** + * struct rkisp1_cif_isp_bls_fixed_val - BLS fixed subtraction values + * + * The values will be subtracted from the sensor + * values. Therefore a negative value means addition instead of subtraction! + * + * @r: Fixed (signed!) subtraction value for Bayer pattern R + * @gr: Fixed (signed!) subtraction value for Bayer pattern Gr + * @gb: Fixed (signed!) subtraction value for Bayer pattern Gb + * @b: Fixed (signed!) subtraction value for Bayer pattern B + */ +struct rkisp1_cif_isp_bls_fixed_val { + __s16 r; + __s16 gr; + __s16 gb; + __s16 b; +}; + +/** + * struct rkisp1_cif_isp_bls_config - Configuration used by black level subtraction + * + * @enable_auto: Automatic mode activated means that the measured values + * are subtracted. Otherwise the fixed subtraction + * values will be subtracted. + * @en_windows: enabled window + * @bls_window1: Measurement window 1 size + * @bls_window2: Measurement window 2 size + * @bls_samples: Set amount of measured pixels for each Bayer position + * (A, B,C and D) to 2^bls_samples. + * @fixed_val: Fixed subtraction values + */ +struct rkisp1_cif_isp_bls_config { + __u8 enable_auto; + __u8 en_windows; + struct rkisp1_cif_isp_window bls_window1; + struct rkisp1_cif_isp_window bls_window2; + __u8 bls_samples; + struct rkisp1_cif_isp_bls_fixed_val fixed_val; +}; + +/** + * struct rkisp1_cif_isp_dpcc_methods_config - DPCC methods set configuration + * + * This structure stores the configuration of one set of methods for the DPCC + * algorithm. Multiple methods can be selected in each set (independently for + * the Green and Red/Blue components) through the @method field, the result is + * the logical AND of all enabled methods. The remaining fields set thresholds + * and factors for each method. + * + * @method: Method enable bits (RKISP1_CIF_ISP_DPCC_METHODS_SET_*) + * @line_thresh: Line threshold (RKISP1_CIF_ISP_DPCC_LINE_THRESH_*) + * @line_mad_fac: Line Mean Absolute Difference factor (RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_*) + * @pg_fac: Peak gradient factor (RKISP1_CIF_ISP_DPCC_PG_FAC_*) + * @rnd_thresh: Rank Neighbor Difference threshold (RKISP1_CIF_ISP_DPCC_RND_THRESH_*) + * @rg_fac: Rank gradient factor (RKISP1_CIF_ISP_DPCC_RG_FAC_*) + */ +struct rkisp1_cif_isp_dpcc_methods_config { + __u32 method; + __u32 line_thresh; + __u32 line_mad_fac; + __u32 pg_fac; + __u32 rnd_thresh; + __u32 rg_fac; +}; + +/** + * struct rkisp1_cif_isp_dpcc_config - Configuration used by DPCC + * + * Configuration used by Defect Pixel Cluster Correction. Three sets of methods + * can be configured and selected through the @set_use field. The result is the + * logical OR of all enabled sets. + * + * @mode: DPCC mode (RKISP1_CIF_ISP_DPCC_MODE_*) + * @output_mode: Interpolation output mode (RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_*) + * @set_use: Methods sets selection (RKISP1_CIF_ISP_DPCC_SET_USE_*) + * @methods: Methods sets configuration + * @ro_limits: Rank order limits (RKISP1_CIF_ISP_DPCC_RO_LIMITS_*) + * @rnd_offs: Differential rank offsets for rank neighbor difference (RKISP1_CIF_ISP_DPCC_RND_OFFS_*) + */ +struct rkisp1_cif_isp_dpcc_config { + __u32 mode; + __u32 output_mode; + __u32 set_use; + struct rkisp1_cif_isp_dpcc_methods_config methods[RKISP1_CIF_ISP_DPCC_METHODS_MAX]; + __u32 ro_limits; + __u32 rnd_offs; +}; + +/** + * struct rkisp1_cif_isp_gamma_corr_curve - gamma curve point definition y-axis (output). + * + * The reset values define a linear curve which has the same effect as bypass. Reset values are: + * gamma_y[0] = 0x0000, gamma_y[1] = 0x0100, ... gamma_y[15] = 0x0f00, gamma_y[16] = 0xfff + * + * @gamma_y: the values for the y-axis of gamma curve points. Each value is 12 bit. + */ +struct rkisp1_cif_isp_gamma_corr_curve { + __u16 gamma_y[RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE]; +}; + +/** + * struct rkisp1_cif_isp_gamma_curve_x_axis_pnts - De-Gamma Curve definition x increments + * (sampling points). gamma_dx0 is for the lower samples (1-8), gamma_dx1 is for the + * higher samples (9-16). The reset values for both fields is 0x44444444. This means + * that each sample is 4 units away from the previous one on the x-axis. + * + * @gamma_dx0: gamma curve sample points definitions. Bits 0:2 for sample 1. Bit 3 unused. + * Bits 4:6 for sample 2. bit 7 unused ... Bits 28:30 for sample 8. Bit 31 unused + * @gamma_dx1: gamma curve sample points definitions. Bits 0:2 for sample 9. Bit 3 unused. + * Bits 4:6 for sample 10. bit 7 unused ... Bits 28:30 for sample 16. Bit 31 unused + */ +struct rkisp1_cif_isp_gamma_curve_x_axis_pnts { + __u32 gamma_dx0; + __u32 gamma_dx1; +}; + +/** + * struct rkisp1_cif_isp_sdg_config - Configuration used by sensor degamma + * + * @curve_r: gamma curve point definition axis for red + * @curve_g: gamma curve point definition axis for green + * @curve_b: gamma curve point definition axis for blue + * @xa_pnts: x axis increments + */ +struct rkisp1_cif_isp_sdg_config { + struct rkisp1_cif_isp_gamma_corr_curve curve_r; + struct rkisp1_cif_isp_gamma_corr_curve curve_g; + struct rkisp1_cif_isp_gamma_corr_curve curve_b; + struct rkisp1_cif_isp_gamma_curve_x_axis_pnts xa_pnts; +}; + +/** + * struct rkisp1_cif_isp_lsc_config - Configuration used by Lens shading correction + * + * @r_data_tbl: sample table red + * @gr_data_tbl: sample table green (red) + * @gb_data_tbl: sample table green (blue) + * @b_data_tbl: sample table blue + * @x_grad_tbl: gradient table x + * @y_grad_tbl: gradient table y + * @x_size_tbl: size table x + * @y_size_tbl: size table y + * @config_width: not used at the moment + * @config_height: not used at the moment + */ +struct rkisp1_cif_isp_lsc_config { + __u16 r_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX]; + __u16 gr_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX]; + __u16 gb_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX]; + __u16 b_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX]; + + __u16 x_grad_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE]; + __u16 y_grad_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE]; + + __u16 x_size_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE]; + __u16 y_size_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE]; + __u16 config_width; + __u16 config_height; +}; + +/** + * struct rkisp1_cif_isp_ie_config - Configuration used by image effects + * + * @effect: values from 'enum v4l2_colorfx'. Possible values are: V4L2_COLORFX_SEPIA, + * V4L2_COLORFX_SET_CBCR, V4L2_COLORFX_AQUA, V4L2_COLORFX_EMBOSS, + * V4L2_COLORFX_SKETCH, V4L2_COLORFX_BW, V4L2_COLORFX_NEGATIVE + * @color_sel: bits 0:2 - colors bitmask (001 - blue, 010 - green, 100 - red). + * bits 8:15 - Threshold value of the RGB colors for the color selection effect. + * @eff_mat_1: 3x3 Matrix Coefficients for Emboss Effect 1 + * @eff_mat_2: 3x3 Matrix Coefficients for Emboss Effect 2 + * @eff_mat_3: 3x3 Matrix Coefficients for Emboss 3/Sketch 1 + * @eff_mat_4: 3x3 Matrix Coefficients for Sketch Effect 2 + * @eff_mat_5: 3x3 Matrix Coefficients for Sketch Effect 3 + * @eff_tint: Chrominance increment values of tint (used for sepia effect) + */ +struct rkisp1_cif_isp_ie_config { + __u16 effect; + __u16 color_sel; + __u16 eff_mat_1; + __u16 eff_mat_2; + __u16 eff_mat_3; + __u16 eff_mat_4; + __u16 eff_mat_5; + __u16 eff_tint; +}; + +/** + * struct rkisp1_cif_isp_cproc_config - Configuration used by Color Processing + * + * @c_out_range: Chrominance pixel clipping range at output. + * (0 for limit, 1 for full) + * @y_in_range: Luminance pixel clipping range at output. + * @y_out_range: Luminance pixel clipping range at output. + * @contrast: 00~ff, 0.0~1.992 + * @brightness: 80~7F, -128~+127 + * @sat: saturation, 00~FF, 0.0~1.992 + * @hue: 80~7F, -90~+87.188 + */ +struct rkisp1_cif_isp_cproc_config { + __u8 c_out_range; + __u8 y_in_range; + __u8 y_out_range; + __u8 contrast; + __u8 brightness; + __u8 sat; + __u8 hue; +}; + +/** + * struct rkisp1_cif_isp_awb_meas_config - Configuration for the AWB statistics + * + * @awb_mode: the awb meas mode. From enum rkisp1_cif_isp_awb_mode_type. + * @awb_wnd: white balance measurement window (in pixels) + * @max_y: only pixels values < max_y contribute to awb measurement, set to 0 + * to disable this feature + * @min_y: only pixels values > min_y contribute to awb measurement + * @max_csum: Chrominance sum maximum value, only consider pixels with Cb+Cr, + * smaller than threshold for awb measurements + * @min_c: Chrominance minimum value, only consider pixels with Cb/Cr + * each greater than threshold value for awb measurements + * @frames: number of frames - 1 used for mean value calculation + * (ucFrames=0 means 1 Frame) + * @awb_ref_cr: reference Cr value for AWB regulation, target for AWB + * @awb_ref_cb: reference Cb value for AWB regulation, target for AWB + * @enable_ymax_cmp: enable Y_MAX compare (Not valid in RGB measurement mode.) + */ +struct rkisp1_cif_isp_awb_meas_config { + /* + * Note: currently the h and v offsets are mapped to grid offsets + */ + struct rkisp1_cif_isp_window awb_wnd; + __u32 awb_mode; + __u8 max_y; + __u8 min_y; + __u8 max_csum; + __u8 min_c; + __u8 frames; + __u8 awb_ref_cr; + __u8 awb_ref_cb; + __u8 enable_ymax_cmp; +}; + +/** + * struct rkisp1_cif_isp_awb_gain_config - Configuration used by auto white balance gain + * + * All fields in this struct are 10 bit, where: + * 0x100h = 1, unsigned integer value, range 0 to 4 with 8 bit fractional part. + * + * out_data_x = ( AWB_GAIN_X * in_data + 128) >> 8 + * + * @gain_red: gain value for red component. + * @gain_green_r: gain value for green component in red line. + * @gain_blue: gain value for blue component. + * @gain_green_b: gain value for green component in blue line. + */ +struct rkisp1_cif_isp_awb_gain_config { + __u16 gain_red; + __u16 gain_green_r; + __u16 gain_blue; + __u16 gain_green_b; +}; + +/** + * struct rkisp1_cif_isp_flt_config - Configuration used by ISP filtering + * + * All 4 threshold fields (thresh_*) are 10 bits. + * All 6 factor fields (fac_*) are 6 bits. + * + * @mode: ISP_FILT_MODE register fields (from enum rkisp1_cif_isp_flt_mode) + * @grn_stage1: Green filter stage 1 select (range 0x0...0x8) + * @chr_h_mode: Chroma filter horizontal mode + * @chr_v_mode: Chroma filter vertical mode + * @thresh_bl0: If thresh_bl1 < sum_grad < thresh_bl0 then fac_bl0 is selected (blurring th) + * @thresh_bl1: If sum_grad < thresh_bl1 then fac_bl1 is selected (blurring th) + * @thresh_sh0: If thresh_sh0 < sum_grad < thresh_sh1 then thresh_sh0 is selected (sharpening th) + * @thresh_sh1: If thresh_sh1 < sum_grad then thresh_sh1 is selected (sharpening th) + * @lum_weight: Parameters for luminance weight function. + * @fac_sh1: filter factor for sharp1 level + * @fac_sh0: filter factor for sharp0 level + * @fac_mid: filter factor for mid level and for static filter mode + * @fac_bl0: filter factor for blur 0 level + * @fac_bl1: filter factor for blur 1 level (max blur) + */ +struct rkisp1_cif_isp_flt_config { + __u32 mode; + __u8 grn_stage1; + __u8 chr_h_mode; + __u8 chr_v_mode; + __u32 thresh_bl0; + __u32 thresh_bl1; + __u32 thresh_sh0; + __u32 thresh_sh1; + __u32 lum_weight; + __u32 fac_sh1; + __u32 fac_sh0; + __u32 fac_mid; + __u32 fac_bl0; + __u32 fac_bl1; +}; + +/** + * struct rkisp1_cif_isp_bdm_config - Configuration used by Bayer DeMosaic + * + * @demosaic_th: threshold for bayer demosaicing texture detection + */ +struct rkisp1_cif_isp_bdm_config { + __u8 demosaic_th; +}; + +/** + * struct rkisp1_cif_isp_ctk_config - Configuration used by Cross Talk correction + * + * @coeff: color correction matrix. Values are 11-bit signed fixed-point numbers with 4 bit integer + * and 7 bit fractional part, ranging from -8 (0x400) to +7.992 (0x3FF). 0 is + * represented by 0x000 and a coefficient value of 1 as 0x080. + * @ct_offset: Red, Green, Blue offsets for the crosstalk correction matrix + */ +struct rkisp1_cif_isp_ctk_config { + __u16 coeff[3][3]; + __u16 ct_offset[3]; +}; + +enum rkisp1_cif_isp_goc_mode { + RKISP1_CIF_ISP_GOC_MODE_LOGARITHMIC, + RKISP1_CIF_ISP_GOC_MODE_EQUIDISTANT +}; + +/** + * struct rkisp1_cif_isp_goc_config - Configuration used by Gamma Out correction + * + * @mode: goc mode (from enum rkisp1_cif_isp_goc_mode) + * @gamma_y: gamma out curve y-axis for all color components + * + * The number of entries of @gamma_y depends on the hardware revision + * as is reported by the hw_revision field of the struct media_device_info + * that is returned by ioctl MEDIA_IOC_DEVICE_INFO. + * + * V10 has RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10 entries, V12 has + * RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 entries. + * RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES is equal to the maximum of the two. + */ +struct rkisp1_cif_isp_goc_config { + __u32 mode; + __u16 gamma_y[RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES]; +}; + +/** + * struct rkisp1_cif_isp_hst_config - Configuration for Histogram statistics + * + * @mode: histogram mode (from enum rkisp1_cif_isp_histogram_mode) + * @histogram_predivider: process every stepsize pixel, all other pixels are + * skipped + * @meas_window: coordinates of the measure window + * @hist_weight: weighting factor for sub-windows + * + * The number of entries of @hist_weight depends on the hardware revision + * as is reported by the hw_revision field of the struct media_device_info + * that is returned by ioctl MEDIA_IOC_DEVICE_INFO. + * + * V10 has RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10 entries, V12 has + * RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12 entries. + * RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE is equal to the maximum of the + * two. + */ +struct rkisp1_cif_isp_hst_config { + __u32 mode; + __u8 histogram_predivider; + struct rkisp1_cif_isp_window meas_window; + __u8 hist_weight[RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE]; +}; + +/** + * struct rkisp1_cif_isp_aec_config - Configuration for Auto Exposure statistics + * + * @mode: Exposure measure mode (from enum rkisp1_cif_isp_exp_meas_mode) + * @autostop: stop mode (from enum rkisp1_cif_isp_exp_ctrl_autostop) + * @meas_window: coordinates of the measure window + */ +struct rkisp1_cif_isp_aec_config { + __u32 mode; + __u32 autostop; + struct rkisp1_cif_isp_window meas_window; +}; + +/** + * struct rkisp1_cif_isp_afc_config - Configuration for the Auto Focus statistics + * + * @num_afm_win: max RKISP1_CIF_ISP_AFM_MAX_WINDOWS + * @afm_win: coordinates of the meas window + * @thres: threshold used for minimizing the influence of noise + * @var_shift: the number of bits for the shift operation at the end of the + * calculation chain. + */ +struct rkisp1_cif_isp_afc_config { + __u8 num_afm_win; + struct rkisp1_cif_isp_window afm_win[RKISP1_CIF_ISP_AFM_MAX_WINDOWS]; + __u32 thres; + __u32 var_shift; +}; + +/** + * enum rkisp1_cif_isp_dpf_gain_usage - dpf gain usage + * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED: don't use any gains in preprocessing stage + * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_GAINS: use only the noise function gains from + * registers DPF_NF_GAIN_R, ... + * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS: use only the gains from LSC module + * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_LSC_GAINS: use the noise function gains and the + * gains from LSC module + * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS: use only the gains from AWB module + * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS: use the gains from AWB and LSC module + * @RKISP1_CIF_ISP_DPF_GAIN_USAGE_MAX: upper border (only for an internal evaluation) + */ +enum rkisp1_cif_isp_dpf_gain_usage { + RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED, + RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_GAINS, + RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS, + RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_LSC_GAINS, + RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS, + RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS, + RKISP1_CIF_ISP_DPF_GAIN_USAGE_MAX +}; + +/** + * enum rkisp1_cif_isp_dpf_rb_filtersize - Red and blue filter sizes + * @RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_13x9: red and blue filter kernel size 13x9 + * (means 7x5 active pixel) + * @RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9: red and blue filter kernel size 9x9 + * (means 5x5 active pixel) + */ +enum rkisp1_cif_isp_dpf_rb_filtersize { + RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_13x9, + RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9, +}; + +/** + * enum rkisp1_cif_isp_dpf_nll_scale_mode - dpf noise level scale mode + * @RKISP1_CIF_ISP_NLL_SCALE_LINEAR: use a linear scaling + * @RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC: use a logarithmic scaling + */ +enum rkisp1_cif_isp_dpf_nll_scale_mode { + RKISP1_CIF_ISP_NLL_SCALE_LINEAR, + RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC, +}; + +/** + * struct rkisp1_cif_isp_dpf_nll - Noise level lookup + * + * @coeff: Noise level Lookup coefficient + * @scale_mode: dpf noise level scale mode (from enum rkisp1_cif_isp_dpf_nll_scale_mode) + */ +struct rkisp1_cif_isp_dpf_nll { + __u16 coeff[RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS]; + __u32 scale_mode; +}; + +/** + * struct rkisp1_cif_isp_dpf_rb_flt - Red blue filter config + * + * @fltsize: The filter size for the red and blue pixels + * (from enum rkisp1_cif_isp_dpf_rb_filtersize) + * @spatial_coeff: Spatial weights + * @r_enable: enable filter processing for red pixels + * @b_enable: enable filter processing for blue pixels + */ +struct rkisp1_cif_isp_dpf_rb_flt { + __u32 fltsize; + __u8 spatial_coeff[RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS]; + __u8 r_enable; + __u8 b_enable; +}; + +/** + * struct rkisp1_cif_isp_dpf_g_flt - Green filter Configuration + * + * @spatial_coeff: Spatial weights + * @gr_enable: enable filter processing for green pixels in green/red lines + * @gb_enable: enable filter processing for green pixels in green/blue lines + */ +struct rkisp1_cif_isp_dpf_g_flt { + __u8 spatial_coeff[RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS]; + __u8 gr_enable; + __u8 gb_enable; +}; + +/** + * struct rkisp1_cif_isp_dpf_gain - Noise function Configuration + * + * @mode: dpf gain usage (from enum rkisp1_cif_isp_dpf_gain_usage) + * @nf_r_gain: Noise function Gain that replaces the AWB gain for red pixels + * @nf_b_gain: Noise function Gain that replaces the AWB gain for blue pixels + * @nf_gr_gain: Noise function Gain that replaces the AWB gain + * for green pixels in a red line + * @nf_gb_gain: Noise function Gain that replaces the AWB gain + * for green pixels in a blue line + */ +struct rkisp1_cif_isp_dpf_gain { + __u32 mode; + __u16 nf_r_gain; + __u16 nf_b_gain; + __u16 nf_gr_gain; + __u16 nf_gb_gain; +}; + +/** + * struct rkisp1_cif_isp_dpf_config - Configuration used by De-noising pre-filter + * + * @gain: noise function gain + * @g_flt: green filter config + * @rb_flt: red blue filter config + * @nll: noise level lookup + */ +struct rkisp1_cif_isp_dpf_config { + struct rkisp1_cif_isp_dpf_gain gain; + struct rkisp1_cif_isp_dpf_g_flt g_flt; + struct rkisp1_cif_isp_dpf_rb_flt rb_flt; + struct rkisp1_cif_isp_dpf_nll nll; +}; + +/** + * struct rkisp1_cif_isp_dpf_strength_config - strength of the filter + * + * @r: filter strength of the RED filter + * @g: filter strength of the GREEN filter + * @b: filter strength of the BLUE filter + */ +struct rkisp1_cif_isp_dpf_strength_config { + __u8 r; + __u8 g; + __u8 b; +}; + +/** + * struct rkisp1_cif_isp_isp_other_cfg - Parameters for some blocks in rockchip isp1 + * + * @dpcc_config: Defect Pixel Cluster Correction config + * @bls_config: Black Level Subtraction config + * @sdg_config: sensor degamma config + * @lsc_config: Lens Shade config + * @awb_gain_config: Auto White balance gain config + * @flt_config: filter config + * @bdm_config: demosaic config + * @ctk_config: cross talk config + * @goc_config: gamma out config + * @bls_config: black level subtraction config + * @dpf_config: De-noising pre-filter config + * @dpf_strength_config: dpf strength config + * @cproc_config: color process config + * @ie_config: image effects config + */ +struct rkisp1_cif_isp_isp_other_cfg { + struct rkisp1_cif_isp_dpcc_config dpcc_config; + struct rkisp1_cif_isp_bls_config bls_config; + struct rkisp1_cif_isp_sdg_config sdg_config; + struct rkisp1_cif_isp_lsc_config lsc_config; + struct rkisp1_cif_isp_awb_gain_config awb_gain_config; + struct rkisp1_cif_isp_flt_config flt_config; + struct rkisp1_cif_isp_bdm_config bdm_config; + struct rkisp1_cif_isp_ctk_config ctk_config; + struct rkisp1_cif_isp_goc_config goc_config; + struct rkisp1_cif_isp_dpf_config dpf_config; + struct rkisp1_cif_isp_dpf_strength_config dpf_strength_config; + struct rkisp1_cif_isp_cproc_config cproc_config; + struct rkisp1_cif_isp_ie_config ie_config; +}; + +/** + * struct rkisp1_cif_isp_isp_meas_cfg - Rockchip ISP1 Measure Parameters + * + * @awb_meas_config: auto white balance config + * @hst_config: histogram config + * @aec_config: auto exposure config + * @afc_config: auto focus config + */ +struct rkisp1_cif_isp_isp_meas_cfg { + struct rkisp1_cif_isp_awb_meas_config awb_meas_config; + struct rkisp1_cif_isp_hst_config hst_config; + struct rkisp1_cif_isp_aec_config aec_config; + struct rkisp1_cif_isp_afc_config afc_config; +}; + +/** + * struct rkisp1_params_cfg - Rockchip ISP1 Input Parameters Meta Data + * + * @module_en_update: mask the enable bits of which module should be updated + * @module_ens: mask the enable value of each module, only update the module + * which correspond bit was set in module_en_update + * @module_cfg_update: mask the config bits of which module should be updated + * @meas: measurement config + * @others: other config + */ +struct rkisp1_params_cfg { + __u32 module_en_update; + __u32 module_ens; + __u32 module_cfg_update; + + struct rkisp1_cif_isp_isp_meas_cfg meas; + struct rkisp1_cif_isp_isp_other_cfg others; +}; + +/*---------- PART2: Measurement Statistics ------------*/ + +/** + * struct rkisp1_cif_isp_awb_meas - AWB measured values + * + * @cnt: White pixel count, number of "white pixels" found during last + * measurement + * @mean_y_or_g: Mean value of Y within window and frames, + * Green if RGB is selected. + * @mean_cb_or_b: Mean value of Cb within window and frames, + * Blue if RGB is selected. + * @mean_cr_or_r: Mean value of Cr within window and frames, + * Red if RGB is selected. + */ +struct rkisp1_cif_isp_awb_meas { + __u32 cnt; + __u8 mean_y_or_g; + __u8 mean_cb_or_b; + __u8 mean_cr_or_r; +}; + +/** + * struct rkisp1_cif_isp_awb_stat - statistics automatic white balance data + * + * @awb_mean: Mean measured data + */ +struct rkisp1_cif_isp_awb_stat { + struct rkisp1_cif_isp_awb_meas awb_mean[RKISP1_CIF_ISP_AWB_MAX_GRID]; +}; + +/** + * struct rkisp1_cif_isp_bls_meas_val - BLS measured values + * + * @meas_r: Mean measured value for Bayer pattern R + * @meas_gr: Mean measured value for Bayer pattern Gr + * @meas_gb: Mean measured value for Bayer pattern Gb + * @meas_b: Mean measured value for Bayer pattern B + */ +struct rkisp1_cif_isp_bls_meas_val { + __u16 meas_r; + __u16 meas_gr; + __u16 meas_gb; + __u16 meas_b; +}; + +/** + * struct rkisp1_cif_isp_ae_stat - statistics auto exposure data + * + * @exp_mean: Mean luminance value of block xx + * @bls_val: BLS measured values + * + * The number of entries of @exp_mean depends on the hardware revision + * as is reported by the hw_revision field of the struct media_device_info + * that is returned by ioctl MEDIA_IOC_DEVICE_INFO. + * + * V10 has RKISP1_CIF_ISP_AE_MEAN_MAX_V10 entries, V12 has + * RKISP1_CIF_ISP_AE_MEAN_MAX_V12 entries. RKISP1_CIF_ISP_AE_MEAN_MAX is equal + * to the maximum of the two. + * + * Image is divided into 5x5 blocks on V10 and 9x9 blocks on V12. + */ +struct rkisp1_cif_isp_ae_stat { + __u8 exp_mean[RKISP1_CIF_ISP_AE_MEAN_MAX]; + struct rkisp1_cif_isp_bls_meas_val bls_val; +}; + +/** + * struct rkisp1_cif_isp_af_meas_val - AF measured values + * + * @sum: sharpness value + * @lum: luminance value + */ +struct rkisp1_cif_isp_af_meas_val { + __u32 sum; + __u32 lum; +}; + +/** + * struct rkisp1_cif_isp_af_stat - statistics auto focus data + * + * @window: AF measured value of window x + * + * The module measures the sharpness in 3 windows of selectable size via + * register settings(ISP_AFM_*_A/B/C) + */ +struct rkisp1_cif_isp_af_stat { + struct rkisp1_cif_isp_af_meas_val window[RKISP1_CIF_ISP_AFM_MAX_WINDOWS]; +}; + +/** + * struct rkisp1_cif_isp_hist_stat - statistics histogram data + * + * @hist_bins: measured bin counters. Each bin is a 20 bits unsigned fixed point + * type. Bits 0-4 are the fractional part and bits 5-19 are the + * integer part. + * + * The window of the measurements area is divided to 5x5 sub-windows for + * V10 and to 9x9 sub-windows for V12. The histogram is then computed for each + * sub-window independently and the final result is a weighted average of the + * histogram measurements on all sub-windows. The window of the measurements + * area and the weight of each sub-window are configurable using + * struct @rkisp1_cif_isp_hst_config. + * + * The histogram contains 16 bins in V10 and 32 bins in V12. + * + * The number of entries of @hist_bins depends on the hardware revision + * as is reported by the hw_revision field of the struct media_device_info + * that is returned by ioctl MEDIA_IOC_DEVICE_INFO. + * + * V10 has RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10 entries, V12 has + * RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 entries. RKISP1_CIF_ISP_HIST_BIN_N_MAX is + * equal to the maximum of the two. + */ +struct rkisp1_cif_isp_hist_stat { + __u32 hist_bins[RKISP1_CIF_ISP_HIST_BIN_N_MAX]; +}; + +/** + * struct rkisp1_cif_isp_stat - Rockchip ISP1 Statistics Data + * + * @awb: statistics data for automatic white balance + * @ae: statistics data for auto exposure + * @af: statistics data for auto focus + * @hist: statistics histogram data + */ +struct rkisp1_cif_isp_stat { + struct rkisp1_cif_isp_awb_stat awb; + struct rkisp1_cif_isp_ae_stat ae; + struct rkisp1_cif_isp_af_stat af; + struct rkisp1_cif_isp_hist_stat hist; +}; + +/** + * struct rkisp1_stat_buffer - Rockchip ISP1 Statistics Meta Data + * + * @meas_type: measurement types (RKISP1_CIF_ISP_STAT_* definitions) + * @frame_id: frame ID for sync + * @params: statistics data + */ +struct rkisp1_stat_buffer { + __u32 meas_type; + __u32 frame_id; + struct rkisp1_cif_isp_stat params; +}; + +#endif /* _RKISP1_CONFIG_H */ diff --git a/spider-cam/libcamera/include/linux/udmabuf.h b/spider-cam/libcamera/include/linux/udmabuf.h new file mode 100644 index 0000000..76cc7de --- /dev/null +++ b/spider-cam/libcamera/include/linux/udmabuf.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_UDMABUF_H +#define _LINUX_UDMABUF_H + +#include +#include + +#define UDMABUF_FLAGS_CLOEXEC 0x01 + +struct udmabuf_create { + __u32 memfd; + __u32 flags; + __u64 offset; + __u64 size; +}; + +struct udmabuf_create_item { + __u32 memfd; + __u32 __pad; + __u64 offset; + __u64 size; +}; + +struct udmabuf_create_list { + __u32 flags; + __u32 count; + struct udmabuf_create_item list[]; +}; + +#define UDMABUF_CREATE _IOW('u', 0x42, struct udmabuf_create) +#define UDMABUF_CREATE_LIST _IOW('u', 0x43, struct udmabuf_create_list) + +#endif /* _LINUX_UDMABUF_H */ diff --git a/spider-cam/libcamera/include/linux/v4l2-common.h b/spider-cam/libcamera/include/linux/v4l2-common.h new file mode 100644 index 0000000..c3ca11e --- /dev/null +++ b/spider-cam/libcamera/include/linux/v4l2-common.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* + * include/linux/v4l2-common.h + * + * Common V4L2 and V4L2 subdev definitions. + * + * Users are advised to #include this file either through videodev2.h + * (V4L2) or through v4l2-subdev.h (V4L2 subdev) rather than to refer + * to this file directly. + * + * Copyright (C) 2012 Nokia Corporation + * Contact: Sakari Ailus + */ + +#ifndef __V4L2_COMMON__ +#define __V4L2_COMMON__ + +#include + +/* + * + * Selection interface definitions + * + */ + +/* Current cropping area */ +#define V4L2_SEL_TGT_CROP 0x0000 +/* Default cropping area */ +#define V4L2_SEL_TGT_CROP_DEFAULT 0x0001 +/* Cropping bounds */ +#define V4L2_SEL_TGT_CROP_BOUNDS 0x0002 +/* Native frame size */ +#define V4L2_SEL_TGT_NATIVE_SIZE 0x0003 +/* Current composing area */ +#define V4L2_SEL_TGT_COMPOSE 0x0100 +/* Default composing area */ +#define V4L2_SEL_TGT_COMPOSE_DEFAULT 0x0101 +/* Composing bounds */ +#define V4L2_SEL_TGT_COMPOSE_BOUNDS 0x0102 +/* Current composing area plus all padding pixels */ +#define V4L2_SEL_TGT_COMPOSE_PADDED 0x0103 + +/* Selection flags */ +#define V4L2_SEL_FLAG_GE (1 << 0) +#define V4L2_SEL_FLAG_LE (1 << 1) +#define V4L2_SEL_FLAG_KEEP_CONFIG (1 << 2) + +struct v4l2_edid { + __u32 pad; + __u32 start_block; + __u32 blocks; + __u32 reserved[5]; + __u8 *edid; +}; + +/* Backward compatibility target definitions --- to be removed. */ +#define V4L2_SEL_TGT_CROP_ACTIVE V4L2_SEL_TGT_CROP +#define V4L2_SEL_TGT_COMPOSE_ACTIVE V4L2_SEL_TGT_COMPOSE +#define V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL V4L2_SEL_TGT_CROP +#define V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL V4L2_SEL_TGT_COMPOSE +#define V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS V4L2_SEL_TGT_CROP_BOUNDS +#define V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS V4L2_SEL_TGT_COMPOSE_BOUNDS + +/* Backward compatibility flag definitions --- to be removed. */ +#define V4L2_SUBDEV_SEL_FLAG_SIZE_GE V4L2_SEL_FLAG_GE +#define V4L2_SUBDEV_SEL_FLAG_SIZE_LE V4L2_SEL_FLAG_LE +#define V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG V4L2_SEL_FLAG_KEEP_CONFIG + +#endif /* __V4L2_COMMON__ */ diff --git a/spider-cam/libcamera/include/linux/v4l2-controls.h b/spider-cam/libcamera/include/linux/v4l2-controls.h new file mode 100644 index 0000000..1e6e816 --- /dev/null +++ b/spider-cam/libcamera/include/linux/v4l2-controls.h @@ -0,0 +1,3501 @@ +/* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* + * Video for Linux Two controls header file + * + * Copyright (C) 1999-2012 the contributors + * + * The contents of this header was split off from videodev2.h. All control + * definitions should be added to this header, which is included by + * videodev2.h. + */ + +#ifndef __LINUX_V4L2_CONTROLS_H +#define __LINUX_V4L2_CONTROLS_H + +#include +#include + +/* Control classes */ +#define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */ +#define V4L2_CTRL_CLASS_CODEC 0x00990000 /* Stateful codec controls */ +#define V4L2_CTRL_CLASS_CAMERA 0x009a0000 /* Camera class controls */ +#define V4L2_CTRL_CLASS_FM_TX 0x009b0000 /* FM Modulator controls */ +#define V4L2_CTRL_CLASS_FLASH 0x009c0000 /* Camera flash controls */ +#define V4L2_CTRL_CLASS_JPEG 0x009d0000 /* JPEG-compression controls */ +#define V4L2_CTRL_CLASS_IMAGE_SOURCE 0x009e0000 /* Image source controls */ +#define V4L2_CTRL_CLASS_IMAGE_PROC 0x009f0000 /* Image processing controls */ +#define V4L2_CTRL_CLASS_DV 0x00a00000 /* Digital Video controls */ +#define V4L2_CTRL_CLASS_FM_RX 0x00a10000 /* FM Receiver controls */ +#define V4L2_CTRL_CLASS_RF_TUNER 0x00a20000 /* RF tuner controls */ +#define V4L2_CTRL_CLASS_DETECT 0x00a30000 /* Detection controls */ +#define V4L2_CTRL_CLASS_CODEC_STATELESS 0x00a40000 /* Stateless codecs controls */ +#define V4L2_CTRL_CLASS_COLORIMETRY 0x00a50000 /* Colorimetry controls */ + +/* User-class control IDs */ + +#define V4L2_CID_BASE (V4L2_CTRL_CLASS_USER | 0x900) +#define V4L2_CID_USER_BASE V4L2_CID_BASE +#define V4L2_CID_USER_CLASS (V4L2_CTRL_CLASS_USER | 1) +#define V4L2_CID_BRIGHTNESS (V4L2_CID_BASE+0) +#define V4L2_CID_CONTRAST (V4L2_CID_BASE+1) +#define V4L2_CID_SATURATION (V4L2_CID_BASE+2) +#define V4L2_CID_HUE (V4L2_CID_BASE+3) +#define V4L2_CID_AUDIO_VOLUME (V4L2_CID_BASE+5) +#define V4L2_CID_AUDIO_BALANCE (V4L2_CID_BASE+6) +#define V4L2_CID_AUDIO_BASS (V4L2_CID_BASE+7) +#define V4L2_CID_AUDIO_TREBLE (V4L2_CID_BASE+8) +#define V4L2_CID_AUDIO_MUTE (V4L2_CID_BASE+9) +#define V4L2_CID_AUDIO_LOUDNESS (V4L2_CID_BASE+10) +#define V4L2_CID_BLACK_LEVEL (V4L2_CID_BASE+11) /* Deprecated */ +#define V4L2_CID_AUTO_WHITE_BALANCE (V4L2_CID_BASE+12) +#define V4L2_CID_DO_WHITE_BALANCE (V4L2_CID_BASE+13) +#define V4L2_CID_RED_BALANCE (V4L2_CID_BASE+14) +#define V4L2_CID_BLUE_BALANCE (V4L2_CID_BASE+15) +#define V4L2_CID_GAMMA (V4L2_CID_BASE+16) +#define V4L2_CID_WHITENESS (V4L2_CID_GAMMA) /* Deprecated */ +#define V4L2_CID_EXPOSURE (V4L2_CID_BASE+17) +#define V4L2_CID_AUTOGAIN (V4L2_CID_BASE+18) +#define V4L2_CID_GAIN (V4L2_CID_BASE+19) +#define V4L2_CID_HFLIP (V4L2_CID_BASE+20) +#define V4L2_CID_VFLIP (V4L2_CID_BASE+21) + +#define V4L2_CID_POWER_LINE_FREQUENCY (V4L2_CID_BASE+24) +enum v4l2_power_line_frequency { + V4L2_CID_POWER_LINE_FREQUENCY_DISABLED = 0, + V4L2_CID_POWER_LINE_FREQUENCY_50HZ = 1, + V4L2_CID_POWER_LINE_FREQUENCY_60HZ = 2, + V4L2_CID_POWER_LINE_FREQUENCY_AUTO = 3, +}; +#define V4L2_CID_HUE_AUTO (V4L2_CID_BASE+25) +#define V4L2_CID_WHITE_BALANCE_TEMPERATURE (V4L2_CID_BASE+26) +#define V4L2_CID_SHARPNESS (V4L2_CID_BASE+27) +#define V4L2_CID_BACKLIGHT_COMPENSATION (V4L2_CID_BASE+28) +#define V4L2_CID_CHROMA_AGC (V4L2_CID_BASE+29) +#define V4L2_CID_COLOR_KILLER (V4L2_CID_BASE+30) +#define V4L2_CID_COLORFX (V4L2_CID_BASE+31) +enum v4l2_colorfx { + V4L2_COLORFX_NONE = 0, + V4L2_COLORFX_BW = 1, + V4L2_COLORFX_SEPIA = 2, + V4L2_COLORFX_NEGATIVE = 3, + V4L2_COLORFX_EMBOSS = 4, + V4L2_COLORFX_SKETCH = 5, + V4L2_COLORFX_SKY_BLUE = 6, + V4L2_COLORFX_GRASS_GREEN = 7, + V4L2_COLORFX_SKIN_WHITEN = 8, + V4L2_COLORFX_VIVID = 9, + V4L2_COLORFX_AQUA = 10, + V4L2_COLORFX_ART_FREEZE = 11, + V4L2_COLORFX_SILHOUETTE = 12, + V4L2_COLORFX_SOLARIZATION = 13, + V4L2_COLORFX_ANTIQUE = 14, + V4L2_COLORFX_SET_CBCR = 15, + V4L2_COLORFX_SET_RGB = 16, +}; +#define V4L2_CID_AUTOBRIGHTNESS (V4L2_CID_BASE+32) +#define V4L2_CID_BAND_STOP_FILTER (V4L2_CID_BASE+33) + +#define V4L2_CID_ROTATE (V4L2_CID_BASE+34) +#define V4L2_CID_BG_COLOR (V4L2_CID_BASE+35) + +#define V4L2_CID_CHROMA_GAIN (V4L2_CID_BASE+36) + +#define V4L2_CID_ILLUMINATORS_1 (V4L2_CID_BASE+37) +#define V4L2_CID_ILLUMINATORS_2 (V4L2_CID_BASE+38) + +#define V4L2_CID_MIN_BUFFERS_FOR_CAPTURE (V4L2_CID_BASE+39) +#define V4L2_CID_MIN_BUFFERS_FOR_OUTPUT (V4L2_CID_BASE+40) + +#define V4L2_CID_ALPHA_COMPONENT (V4L2_CID_BASE+41) +#define V4L2_CID_COLORFX_CBCR (V4L2_CID_BASE+42) +#define V4L2_CID_COLORFX_RGB (V4L2_CID_BASE+43) + +/* last CID + 1 */ +#define V4L2_CID_LASTP1 (V4L2_CID_BASE+44) + +/* USER-class private control IDs */ + +/* + * The base for the meye driver controls. This driver was removed, but + * we keep this define in case any software still uses it. + */ +#define V4L2_CID_USER_MEYE_BASE (V4L2_CID_USER_BASE + 0x1000) + +/* The base for the bttv driver controls. + * We reserve 32 controls for this driver. */ +#define V4L2_CID_USER_BTTV_BASE (V4L2_CID_USER_BASE + 0x1010) + + +/* The base for the s2255 driver controls. + * We reserve 16 controls for this driver. */ +#define V4L2_CID_USER_S2255_BASE (V4L2_CID_USER_BASE + 0x1030) + +/* + * The base for the si476x driver controls. See include/media/drv-intf/si476x.h + * for the list of controls. Total of 16 controls is reserved for this driver + */ +#define V4L2_CID_USER_SI476X_BASE (V4L2_CID_USER_BASE + 0x1040) + +/* The base for the TI VPE driver controls. Total of 16 controls is reserved for + * this driver */ +#define V4L2_CID_USER_TI_VPE_BASE (V4L2_CID_USER_BASE + 0x1050) + +/* The base for the saa7134 driver controls. + * We reserve 16 controls for this driver. */ +#define V4L2_CID_USER_SAA7134_BASE (V4L2_CID_USER_BASE + 0x1060) + +/* The base for the adv7180 driver controls. + * We reserve 16 controls for this driver. */ +#define V4L2_CID_USER_ADV7180_BASE (V4L2_CID_USER_BASE + 0x1070) + +/* The base for the tc358743 driver controls. + * We reserve 16 controls for this driver. */ +#define V4L2_CID_USER_TC358743_BASE (V4L2_CID_USER_BASE + 0x1080) + +/* The base for the max217x driver controls. + * We reserve 32 controls for this driver + */ +#define V4L2_CID_USER_MAX217X_BASE (V4L2_CID_USER_BASE + 0x1090) + +/* The base for the imx driver controls. + * We reserve 16 controls for this driver. */ +#define V4L2_CID_USER_IMX_BASE (V4L2_CID_USER_BASE + 0x10b0) + +/* + * The base for the atmel isc driver controls. + * We reserve 32 controls for this driver. + */ +#define V4L2_CID_USER_ATMEL_ISC_BASE (V4L2_CID_USER_BASE + 0x10c0) + +/* + * The base for the CODA driver controls. + * We reserve 16 controls for this driver. + */ +#define V4L2_CID_USER_CODA_BASE (V4L2_CID_USER_BASE + 0x10e0) +/* + * The base for MIPI CCS driver controls. + * We reserve 128 controls for this driver. + */ +#define V4L2_CID_USER_CCS_BASE (V4L2_CID_USER_BASE + 0x10f0) + +/* The base for the bcm2835-isp driver controls. + * We reserve 16 controls for this driver. */ +#define V4L2_CID_USER_BCM2835_ISP_BASE (V4L2_CID_USER_BASE + 0x10e0) +/* + * The base for Allegro driver controls. + * We reserve 16 controls for this driver. + */ +#define V4L2_CID_USER_ALLEGRO_BASE (V4L2_CID_USER_BASE + 0x1170) + +/* + * The base for the isl7998x driver controls. + * We reserve 16 controls for this driver. + */ +#define V4L2_CID_USER_ISL7998X_BASE (V4L2_CID_USER_BASE + 0x1180) + +/* + * The base for DW100 driver controls. + * We reserve 16 controls for this driver. + */ +#define V4L2_CID_USER_DW100_BASE (V4L2_CID_USER_BASE + 0x1190) + +/* + * The base for Aspeed driver controls. + * We reserve 16 controls for this driver. + */ +#define V4L2_CID_USER_ASPEED_BASE (V4L2_CID_USER_BASE + 0x11a0) + +/* + * The base for Nuvoton NPCM driver controls. + * We reserve 16 controls for this driver. + */ +#define V4L2_CID_USER_NPCM_BASE (V4L2_CID_USER_BASE + 0x11b0) + +/* + * The base for THine THP7312 driver controls. + * We reserve 32 controls for this driver. + */ +#define V4L2_CID_USER_THP7312_BASE (V4L2_CID_USER_BASE + 0x11c0) + +/* MPEG-class control IDs */ +/* The MPEG controls are applicable to all codec controls + * and the 'MPEG' part of the define is historical */ + +#define V4L2_CID_CODEC_BASE (V4L2_CTRL_CLASS_CODEC | 0x900) +#define V4L2_CID_CODEC_CLASS (V4L2_CTRL_CLASS_CODEC | 1) + +/* MPEG streams, specific to multiplexed streams */ +#define V4L2_CID_MPEG_STREAM_TYPE (V4L2_CID_CODEC_BASE+0) +enum v4l2_mpeg_stream_type { + V4L2_MPEG_STREAM_TYPE_MPEG2_PS = 0, /* MPEG-2 program stream */ + V4L2_MPEG_STREAM_TYPE_MPEG2_TS = 1, /* MPEG-2 transport stream */ + V4L2_MPEG_STREAM_TYPE_MPEG1_SS = 2, /* MPEG-1 system stream */ + V4L2_MPEG_STREAM_TYPE_MPEG2_DVD = 3, /* MPEG-2 DVD-compatible stream */ + V4L2_MPEG_STREAM_TYPE_MPEG1_VCD = 4, /* MPEG-1 VCD-compatible stream */ + V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD = 5, /* MPEG-2 SVCD-compatible stream */ +}; +#define V4L2_CID_MPEG_STREAM_PID_PMT (V4L2_CID_CODEC_BASE+1) +#define V4L2_CID_MPEG_STREAM_PID_AUDIO (V4L2_CID_CODEC_BASE+2) +#define V4L2_CID_MPEG_STREAM_PID_VIDEO (V4L2_CID_CODEC_BASE+3) +#define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_CODEC_BASE+4) +#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_CODEC_BASE+5) +#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_CODEC_BASE+6) +#define V4L2_CID_MPEG_STREAM_VBI_FMT (V4L2_CID_CODEC_BASE+7) +enum v4l2_mpeg_stream_vbi_fmt { + V4L2_MPEG_STREAM_VBI_FMT_NONE = 0, /* No VBI in the MPEG stream */ + V4L2_MPEG_STREAM_VBI_FMT_IVTV = 1, /* VBI in private packets, IVTV format */ +}; + +/* MPEG audio controls specific to multiplexed streams */ +#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_CODEC_BASE+100) +enum v4l2_mpeg_audio_sampling_freq { + V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100 = 0, + V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000 = 1, + V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000 = 2, +}; +#define V4L2_CID_MPEG_AUDIO_ENCODING (V4L2_CID_CODEC_BASE+101) +enum v4l2_mpeg_audio_encoding { + V4L2_MPEG_AUDIO_ENCODING_LAYER_1 = 0, + V4L2_MPEG_AUDIO_ENCODING_LAYER_2 = 1, + V4L2_MPEG_AUDIO_ENCODING_LAYER_3 = 2, + V4L2_MPEG_AUDIO_ENCODING_AAC = 3, + V4L2_MPEG_AUDIO_ENCODING_AC3 = 4, +}; +#define V4L2_CID_MPEG_AUDIO_L1_BITRATE (V4L2_CID_CODEC_BASE+102) +enum v4l2_mpeg_audio_l1_bitrate { + V4L2_MPEG_AUDIO_L1_BITRATE_32K = 0, + V4L2_MPEG_AUDIO_L1_BITRATE_64K = 1, + V4L2_MPEG_AUDIO_L1_BITRATE_96K = 2, + V4L2_MPEG_AUDIO_L1_BITRATE_128K = 3, + V4L2_MPEG_AUDIO_L1_BITRATE_160K = 4, + V4L2_MPEG_AUDIO_L1_BITRATE_192K = 5, + V4L2_MPEG_AUDIO_L1_BITRATE_224K = 6, + V4L2_MPEG_AUDIO_L1_BITRATE_256K = 7, + V4L2_MPEG_AUDIO_L1_BITRATE_288K = 8, + V4L2_MPEG_AUDIO_L1_BITRATE_320K = 9, + V4L2_MPEG_AUDIO_L1_BITRATE_352K = 10, + V4L2_MPEG_AUDIO_L1_BITRATE_384K = 11, + V4L2_MPEG_AUDIO_L1_BITRATE_416K = 12, + V4L2_MPEG_AUDIO_L1_BITRATE_448K = 13, +}; +#define V4L2_CID_MPEG_AUDIO_L2_BITRATE (V4L2_CID_CODEC_BASE+103) +enum v4l2_mpeg_audio_l2_bitrate { + V4L2_MPEG_AUDIO_L2_BITRATE_32K = 0, + V4L2_MPEG_AUDIO_L2_BITRATE_48K = 1, + V4L2_MPEG_AUDIO_L2_BITRATE_56K = 2, + V4L2_MPEG_AUDIO_L2_BITRATE_64K = 3, + V4L2_MPEG_AUDIO_L2_BITRATE_80K = 4, + V4L2_MPEG_AUDIO_L2_BITRATE_96K = 5, + V4L2_MPEG_AUDIO_L2_BITRATE_112K = 6, + V4L2_MPEG_AUDIO_L2_BITRATE_128K = 7, + V4L2_MPEG_AUDIO_L2_BITRATE_160K = 8, + V4L2_MPEG_AUDIO_L2_BITRATE_192K = 9, + V4L2_MPEG_AUDIO_L2_BITRATE_224K = 10, + V4L2_MPEG_AUDIO_L2_BITRATE_256K = 11, + V4L2_MPEG_AUDIO_L2_BITRATE_320K = 12, + V4L2_MPEG_AUDIO_L2_BITRATE_384K = 13, +}; +#define V4L2_CID_MPEG_AUDIO_L3_BITRATE (V4L2_CID_CODEC_BASE+104) +enum v4l2_mpeg_audio_l3_bitrate { + V4L2_MPEG_AUDIO_L3_BITRATE_32K = 0, + V4L2_MPEG_AUDIO_L3_BITRATE_40K = 1, + V4L2_MPEG_AUDIO_L3_BITRATE_48K = 2, + V4L2_MPEG_AUDIO_L3_BITRATE_56K = 3, + V4L2_MPEG_AUDIO_L3_BITRATE_64K = 4, + V4L2_MPEG_AUDIO_L3_BITRATE_80K = 5, + V4L2_MPEG_AUDIO_L3_BITRATE_96K = 6, + V4L2_MPEG_AUDIO_L3_BITRATE_112K = 7, + V4L2_MPEG_AUDIO_L3_BITRATE_128K = 8, + V4L2_MPEG_AUDIO_L3_BITRATE_160K = 9, + V4L2_MPEG_AUDIO_L3_BITRATE_192K = 10, + V4L2_MPEG_AUDIO_L3_BITRATE_224K = 11, + V4L2_MPEG_AUDIO_L3_BITRATE_256K = 12, + V4L2_MPEG_AUDIO_L3_BITRATE_320K = 13, +}; +#define V4L2_CID_MPEG_AUDIO_MODE (V4L2_CID_CODEC_BASE+105) +enum v4l2_mpeg_audio_mode { + V4L2_MPEG_AUDIO_MODE_STEREO = 0, + V4L2_MPEG_AUDIO_MODE_JOINT_STEREO = 1, + V4L2_MPEG_AUDIO_MODE_DUAL = 2, + V4L2_MPEG_AUDIO_MODE_MONO = 3, +}; +#define V4L2_CID_MPEG_AUDIO_MODE_EXTENSION (V4L2_CID_CODEC_BASE+106) +enum v4l2_mpeg_audio_mode_extension { + V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4 = 0, + V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8 = 1, + V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12 = 2, + V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16 = 3, +}; +#define V4L2_CID_MPEG_AUDIO_EMPHASIS (V4L2_CID_CODEC_BASE+107) +enum v4l2_mpeg_audio_emphasis { + V4L2_MPEG_AUDIO_EMPHASIS_NONE = 0, + V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS = 1, + V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17 = 2, +}; +#define V4L2_CID_MPEG_AUDIO_CRC (V4L2_CID_CODEC_BASE+108) +enum v4l2_mpeg_audio_crc { + V4L2_MPEG_AUDIO_CRC_NONE = 0, + V4L2_MPEG_AUDIO_CRC_CRC16 = 1, +}; +#define V4L2_CID_MPEG_AUDIO_MUTE (V4L2_CID_CODEC_BASE+109) +#define V4L2_CID_MPEG_AUDIO_AAC_BITRATE (V4L2_CID_CODEC_BASE+110) +#define V4L2_CID_MPEG_AUDIO_AC3_BITRATE (V4L2_CID_CODEC_BASE+111) +enum v4l2_mpeg_audio_ac3_bitrate { + V4L2_MPEG_AUDIO_AC3_BITRATE_32K = 0, + V4L2_MPEG_AUDIO_AC3_BITRATE_40K = 1, + V4L2_MPEG_AUDIO_AC3_BITRATE_48K = 2, + V4L2_MPEG_AUDIO_AC3_BITRATE_56K = 3, + V4L2_MPEG_AUDIO_AC3_BITRATE_64K = 4, + V4L2_MPEG_AUDIO_AC3_BITRATE_80K = 5, + V4L2_MPEG_AUDIO_AC3_BITRATE_96K = 6, + V4L2_MPEG_AUDIO_AC3_BITRATE_112K = 7, + V4L2_MPEG_AUDIO_AC3_BITRATE_128K = 8, + V4L2_MPEG_AUDIO_AC3_BITRATE_160K = 9, + V4L2_MPEG_AUDIO_AC3_BITRATE_192K = 10, + V4L2_MPEG_AUDIO_AC3_BITRATE_224K = 11, + V4L2_MPEG_AUDIO_AC3_BITRATE_256K = 12, + V4L2_MPEG_AUDIO_AC3_BITRATE_320K = 13, + V4L2_MPEG_AUDIO_AC3_BITRATE_384K = 14, + V4L2_MPEG_AUDIO_AC3_BITRATE_448K = 15, + V4L2_MPEG_AUDIO_AC3_BITRATE_512K = 16, + V4L2_MPEG_AUDIO_AC3_BITRATE_576K = 17, + V4L2_MPEG_AUDIO_AC3_BITRATE_640K = 18, +}; +#define V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK (V4L2_CID_CODEC_BASE+112) +enum v4l2_mpeg_audio_dec_playback { + V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO = 0, + V4L2_MPEG_AUDIO_DEC_PLAYBACK_STEREO = 1, + V4L2_MPEG_AUDIO_DEC_PLAYBACK_LEFT = 2, + V4L2_MPEG_AUDIO_DEC_PLAYBACK_RIGHT = 3, + V4L2_MPEG_AUDIO_DEC_PLAYBACK_MONO = 4, + V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO = 5, +}; +#define V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK (V4L2_CID_CODEC_BASE+113) + +/* MPEG video controls specific to multiplexed streams */ +#define V4L2_CID_MPEG_VIDEO_ENCODING (V4L2_CID_CODEC_BASE+200) +enum v4l2_mpeg_video_encoding { + V4L2_MPEG_VIDEO_ENCODING_MPEG_1 = 0, + V4L2_MPEG_VIDEO_ENCODING_MPEG_2 = 1, + V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC = 2, +}; +#define V4L2_CID_MPEG_VIDEO_ASPECT (V4L2_CID_CODEC_BASE+201) +enum v4l2_mpeg_video_aspect { + V4L2_MPEG_VIDEO_ASPECT_1x1 = 0, + V4L2_MPEG_VIDEO_ASPECT_4x3 = 1, + V4L2_MPEG_VIDEO_ASPECT_16x9 = 2, + V4L2_MPEG_VIDEO_ASPECT_221x100 = 3, +}; +#define V4L2_CID_MPEG_VIDEO_B_FRAMES (V4L2_CID_CODEC_BASE+202) +#define V4L2_CID_MPEG_VIDEO_GOP_SIZE (V4L2_CID_CODEC_BASE+203) +#define V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (V4L2_CID_CODEC_BASE+204) +#define V4L2_CID_MPEG_VIDEO_PULLDOWN (V4L2_CID_CODEC_BASE+205) +#define V4L2_CID_MPEG_VIDEO_BITRATE_MODE (V4L2_CID_CODEC_BASE+206) +enum v4l2_mpeg_video_bitrate_mode { + V4L2_MPEG_VIDEO_BITRATE_MODE_VBR = 0, + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR = 1, + V4L2_MPEG_VIDEO_BITRATE_MODE_CQ = 2, +}; +#define V4L2_CID_MPEG_VIDEO_BITRATE (V4L2_CID_CODEC_BASE+207) +#define V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (V4L2_CID_CODEC_BASE+208) +#define V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (V4L2_CID_CODEC_BASE+209) +#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_CODEC_BASE+210) +#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_CODEC_BASE+211) +#define V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE (V4L2_CID_CODEC_BASE+212) +#define V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER (V4L2_CID_CODEC_BASE+213) +#define V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB (V4L2_CID_CODEC_BASE+214) +#define V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE (V4L2_CID_CODEC_BASE+215) +#define V4L2_CID_MPEG_VIDEO_HEADER_MODE (V4L2_CID_CODEC_BASE+216) +enum v4l2_mpeg_video_header_mode { + V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE = 0, + V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME = 1, + +}; +#define V4L2_CID_MPEG_VIDEO_MAX_REF_PIC (V4L2_CID_CODEC_BASE+217) +#define V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE (V4L2_CID_CODEC_BASE+218) +#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES (V4L2_CID_CODEC_BASE+219) +#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB (V4L2_CID_CODEC_BASE+220) +#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE (V4L2_CID_CODEC_BASE+221) +enum v4l2_mpeg_video_multi_slice_mode { + V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE = 0, + V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB = 1, + V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES = 2, + /* Kept for backwards compatibility reasons. Stupid typo... */ + V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB = 1, + V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES = 2, +}; +#define V4L2_CID_MPEG_VIDEO_VBV_SIZE (V4L2_CID_CODEC_BASE+222) +#define V4L2_CID_MPEG_VIDEO_DEC_PTS (V4L2_CID_CODEC_BASE+223) +#define V4L2_CID_MPEG_VIDEO_DEC_FRAME (V4L2_CID_CODEC_BASE+224) +#define V4L2_CID_MPEG_VIDEO_VBV_DELAY (V4L2_CID_CODEC_BASE+225) +#define V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER (V4L2_CID_CODEC_BASE+226) +#define V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE (V4L2_CID_CODEC_BASE+227) +#define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_CODEC_BASE+228) +#define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_CODEC_BASE+229) +#define V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID (V4L2_CID_CODEC_BASE+230) +#define V4L2_CID_MPEG_VIDEO_AU_DELIMITER (V4L2_CID_CODEC_BASE+231) +#define V4L2_CID_MPEG_VIDEO_LTR_COUNT (V4L2_CID_CODEC_BASE+232) +#define V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX (V4L2_CID_CODEC_BASE+233) +#define V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES (V4L2_CID_CODEC_BASE+234) +#define V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR (V4L2_CID_CODEC_BASE+235) +#define V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD (V4L2_CID_CODEC_BASE+236) +#define V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE (V4L2_CID_CODEC_BASE+237) +enum v4l2_mpeg_video_intra_refresh_period_type { + V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_RANDOM = 0, + V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_CYCLIC = 1, +}; + +/* CIDs for the MPEG-2 Part 2 (H.262) codec */ +#define V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL (V4L2_CID_CODEC_BASE+270) +enum v4l2_mpeg_video_mpeg2_level { + V4L2_MPEG_VIDEO_MPEG2_LEVEL_LOW = 0, + V4L2_MPEG_VIDEO_MPEG2_LEVEL_MAIN = 1, + V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH_1440 = 2, + V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH = 3, +}; +#define V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE (V4L2_CID_CODEC_BASE+271) +enum v4l2_mpeg_video_mpeg2_profile { + V4L2_MPEG_VIDEO_MPEG2_PROFILE_SIMPLE = 0, + V4L2_MPEG_VIDEO_MPEG2_PROFILE_MAIN = 1, + V4L2_MPEG_VIDEO_MPEG2_PROFILE_SNR_SCALABLE = 2, + V4L2_MPEG_VIDEO_MPEG2_PROFILE_SPATIALLY_SCALABLE = 3, + V4L2_MPEG_VIDEO_MPEG2_PROFILE_HIGH = 4, + V4L2_MPEG_VIDEO_MPEG2_PROFILE_MULTIVIEW = 5, +}; + +/* CIDs for the FWHT codec as used by the vicodec driver. */ +#define V4L2_CID_FWHT_I_FRAME_QP (V4L2_CID_CODEC_BASE + 290) +#define V4L2_CID_FWHT_P_FRAME_QP (V4L2_CID_CODEC_BASE + 291) + +#define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_CODEC_BASE+300) +#define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_CODEC_BASE+301) +#define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_CODEC_BASE+302) +#define V4L2_CID_MPEG_VIDEO_H263_MIN_QP (V4L2_CID_CODEC_BASE+303) +#define V4L2_CID_MPEG_VIDEO_H263_MAX_QP (V4L2_CID_CODEC_BASE+304) +#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP (V4L2_CID_CODEC_BASE+350) +#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP (V4L2_CID_CODEC_BASE+351) +#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP (V4L2_CID_CODEC_BASE+352) +#define V4L2_CID_MPEG_VIDEO_H264_MIN_QP (V4L2_CID_CODEC_BASE+353) +#define V4L2_CID_MPEG_VIDEO_H264_MAX_QP (V4L2_CID_CODEC_BASE+354) +#define V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM (V4L2_CID_CODEC_BASE+355) +#define V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE (V4L2_CID_CODEC_BASE+356) +#define V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE (V4L2_CID_CODEC_BASE+357) +enum v4l2_mpeg_video_h264_entropy_mode { + V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC = 0, + V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC = 1, +}; +#define V4L2_CID_MPEG_VIDEO_H264_I_PERIOD (V4L2_CID_CODEC_BASE+358) +#define V4L2_CID_MPEG_VIDEO_H264_LEVEL (V4L2_CID_CODEC_BASE+359) +enum v4l2_mpeg_video_h264_level { + V4L2_MPEG_VIDEO_H264_LEVEL_1_0 = 0, + V4L2_MPEG_VIDEO_H264_LEVEL_1B = 1, + V4L2_MPEG_VIDEO_H264_LEVEL_1_1 = 2, + V4L2_MPEG_VIDEO_H264_LEVEL_1_2 = 3, + V4L2_MPEG_VIDEO_H264_LEVEL_1_3 = 4, + V4L2_MPEG_VIDEO_H264_LEVEL_2_0 = 5, + V4L2_MPEG_VIDEO_H264_LEVEL_2_1 = 6, + V4L2_MPEG_VIDEO_H264_LEVEL_2_2 = 7, + V4L2_MPEG_VIDEO_H264_LEVEL_3_0 = 8, + V4L2_MPEG_VIDEO_H264_LEVEL_3_1 = 9, + V4L2_MPEG_VIDEO_H264_LEVEL_3_2 = 10, + V4L2_MPEG_VIDEO_H264_LEVEL_4_0 = 11, + V4L2_MPEG_VIDEO_H264_LEVEL_4_1 = 12, + V4L2_MPEG_VIDEO_H264_LEVEL_4_2 = 13, + V4L2_MPEG_VIDEO_H264_LEVEL_5_0 = 14, + V4L2_MPEG_VIDEO_H264_LEVEL_5_1 = 15, + V4L2_MPEG_VIDEO_H264_LEVEL_5_2 = 16, + V4L2_MPEG_VIDEO_H264_LEVEL_6_0 = 17, + V4L2_MPEG_VIDEO_H264_LEVEL_6_1 = 18, + V4L2_MPEG_VIDEO_H264_LEVEL_6_2 = 19, +}; +#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA (V4L2_CID_CODEC_BASE+360) +#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA (V4L2_CID_CODEC_BASE+361) +#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE (V4L2_CID_CODEC_BASE+362) +enum v4l2_mpeg_video_h264_loop_filter_mode { + V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED = 0, + V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED = 1, + V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY = 2, +}; +#define V4L2_CID_MPEG_VIDEO_H264_PROFILE (V4L2_CID_CODEC_BASE+363) +enum v4l2_mpeg_video_h264_profile { + V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE = 0, + V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE = 1, + V4L2_MPEG_VIDEO_H264_PROFILE_MAIN = 2, + V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED = 3, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH = 4, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10 = 5, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422 = 6, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE = 7, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10_INTRA = 8, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA = 9, + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_INTRA = 10, + V4L2_MPEG_VIDEO_H264_PROFILE_CAVLC_444_INTRA = 11, + V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE = 12, + V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH = 13, + V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA = 14, + V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH = 15, + V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH = 16, + V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH = 17, +}; +#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT (V4L2_CID_CODEC_BASE+364) +#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH (V4L2_CID_CODEC_BASE+365) +#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE (V4L2_CID_CODEC_BASE+366) +#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC (V4L2_CID_CODEC_BASE+367) +enum v4l2_mpeg_video_h264_vui_sar_idc { + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED = 0, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1 = 1, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11 = 2, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11 = 3, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11 = 4, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33 = 5, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11 = 6, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11 = 7, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11 = 8, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33 = 9, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11 = 10, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11 = 11, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33 = 12, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99 = 13, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3 = 14, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2 = 15, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1 = 16, + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED = 17, +}; +#define V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING (V4L2_CID_CODEC_BASE+368) +#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0 (V4L2_CID_CODEC_BASE+369) +#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE (V4L2_CID_CODEC_BASE+370) +enum v4l2_mpeg_video_h264_sei_fp_arrangement_type { + V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_CHECKERBOARD = 0, + V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_COLUMN = 1, + V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_ROW = 2, + V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_SIDE_BY_SIDE = 3, + V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM = 4, + V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TEMPORAL = 5, +}; +#define V4L2_CID_MPEG_VIDEO_H264_FMO (V4L2_CID_CODEC_BASE+371) +#define V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE (V4L2_CID_CODEC_BASE+372) +enum v4l2_mpeg_video_h264_fmo_map_type { + V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_INTERLEAVED_SLICES = 0, + V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_SCATTERED_SLICES = 1, + V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_FOREGROUND_WITH_LEFT_OVER = 2, + V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_BOX_OUT = 3, + V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_RASTER_SCAN = 4, + V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN = 5, + V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_EXPLICIT = 6, +}; +#define V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP (V4L2_CID_CODEC_BASE+373) +#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION (V4L2_CID_CODEC_BASE+374) +enum v4l2_mpeg_video_h264_fmo_change_dir { + V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_RIGHT = 0, + V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_LEFT = 1, +}; +#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE (V4L2_CID_CODEC_BASE+375) +#define V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH (V4L2_CID_CODEC_BASE+376) +#define V4L2_CID_MPEG_VIDEO_H264_ASO (V4L2_CID_CODEC_BASE+377) +#define V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER (V4L2_CID_CODEC_BASE+378) +#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING (V4L2_CID_CODEC_BASE+379) +#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE (V4L2_CID_CODEC_BASE+380) +enum v4l2_mpeg_video_h264_hierarchical_coding_type { + V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_B = 0, + V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_P = 1, +}; +#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER (V4L2_CID_CODEC_BASE+381) +#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP (V4L2_CID_CODEC_BASE+382) +#define V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION (V4L2_CID_CODEC_BASE+383) +#define V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET (V4L2_CID_CODEC_BASE+384) +#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP (V4L2_CID_CODEC_BASE+385) +#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP (V4L2_CID_CODEC_BASE+386) +#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP (V4L2_CID_CODEC_BASE+387) +#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP (V4L2_CID_CODEC_BASE+388) +#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP (V4L2_CID_CODEC_BASE+389) +#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP (V4L2_CID_CODEC_BASE+390) +#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR (V4L2_CID_CODEC_BASE+391) +#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR (V4L2_CID_CODEC_BASE+392) +#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR (V4L2_CID_CODEC_BASE+393) +#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR (V4L2_CID_CODEC_BASE+394) +#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR (V4L2_CID_CODEC_BASE+395) +#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR (V4L2_CID_CODEC_BASE+396) +#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR (V4L2_CID_CODEC_BASE+397) +#define V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (V4L2_CID_CODEC_BASE+400) +#define V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP (V4L2_CID_CODEC_BASE+401) +#define V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP (V4L2_CID_CODEC_BASE+402) +#define V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP (V4L2_CID_CODEC_BASE+403) +#define V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP (V4L2_CID_CODEC_BASE+404) +#define V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL (V4L2_CID_CODEC_BASE+405) +enum v4l2_mpeg_video_mpeg4_level { + V4L2_MPEG_VIDEO_MPEG4_LEVEL_0 = 0, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B = 1, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_1 = 2, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_2 = 3, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_3 = 4, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B = 5, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_4 = 6, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 = 7, +}; +#define V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE (V4L2_CID_CODEC_BASE+406) +enum v4l2_mpeg_video_mpeg4_profile { + V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE = 0, + V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE = 1, + V4L2_MPEG_VIDEO_MPEG4_PROFILE_CORE = 2, + V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE_SCALABLE = 3, + V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY = 4, +}; +#define V4L2_CID_MPEG_VIDEO_MPEG4_QPEL (V4L2_CID_CODEC_BASE+407) + +/* Control IDs for VP8 streams + * Although VP8 is not part of MPEG we add these controls to the MPEG class + * as that class is already handling other video compression standards + */ +#define V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS (V4L2_CID_CODEC_BASE+500) +enum v4l2_vp8_num_partitions { + V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION = 0, + V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS = 1, + V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS = 2, + V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS = 3, +}; +#define V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4 (V4L2_CID_CODEC_BASE+501) +#define V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES (V4L2_CID_CODEC_BASE+502) +enum v4l2_vp8_num_ref_frames { + V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME = 0, + V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME = 1, + V4L2_CID_MPEG_VIDEO_VPX_3_REF_FRAME = 2, +}; +#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL (V4L2_CID_CODEC_BASE+503) +#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS (V4L2_CID_CODEC_BASE+504) +#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD (V4L2_CID_CODEC_BASE+505) +#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL (V4L2_CID_CODEC_BASE+506) +enum v4l2_vp8_golden_frame_sel { + V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV = 0, + V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD = 1, +}; +#define V4L2_CID_MPEG_VIDEO_VPX_MIN_QP (V4L2_CID_CODEC_BASE+507) +#define V4L2_CID_MPEG_VIDEO_VPX_MAX_QP (V4L2_CID_CODEC_BASE+508) +#define V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP (V4L2_CID_CODEC_BASE+509) +#define V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (V4L2_CID_CODEC_BASE+510) + +#define V4L2_CID_MPEG_VIDEO_VP8_PROFILE (V4L2_CID_CODEC_BASE+511) +enum v4l2_mpeg_video_vp8_profile { + V4L2_MPEG_VIDEO_VP8_PROFILE_0 = 0, + V4L2_MPEG_VIDEO_VP8_PROFILE_1 = 1, + V4L2_MPEG_VIDEO_VP8_PROFILE_2 = 2, + V4L2_MPEG_VIDEO_VP8_PROFILE_3 = 3, +}; +/* Deprecated alias for compatibility reasons. */ +#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE V4L2_CID_MPEG_VIDEO_VP8_PROFILE +#define V4L2_CID_MPEG_VIDEO_VP9_PROFILE (V4L2_CID_CODEC_BASE+512) +enum v4l2_mpeg_video_vp9_profile { + V4L2_MPEG_VIDEO_VP9_PROFILE_0 = 0, + V4L2_MPEG_VIDEO_VP9_PROFILE_1 = 1, + V4L2_MPEG_VIDEO_VP9_PROFILE_2 = 2, + V4L2_MPEG_VIDEO_VP9_PROFILE_3 = 3, +}; +#define V4L2_CID_MPEG_VIDEO_VP9_LEVEL (V4L2_CID_CODEC_BASE+513) +enum v4l2_mpeg_video_vp9_level { + V4L2_MPEG_VIDEO_VP9_LEVEL_1_0 = 0, + V4L2_MPEG_VIDEO_VP9_LEVEL_1_1 = 1, + V4L2_MPEG_VIDEO_VP9_LEVEL_2_0 = 2, + V4L2_MPEG_VIDEO_VP9_LEVEL_2_1 = 3, + V4L2_MPEG_VIDEO_VP9_LEVEL_3_0 = 4, + V4L2_MPEG_VIDEO_VP9_LEVEL_3_1 = 5, + V4L2_MPEG_VIDEO_VP9_LEVEL_4_0 = 6, + V4L2_MPEG_VIDEO_VP9_LEVEL_4_1 = 7, + V4L2_MPEG_VIDEO_VP9_LEVEL_5_0 = 8, + V4L2_MPEG_VIDEO_VP9_LEVEL_5_1 = 9, + V4L2_MPEG_VIDEO_VP9_LEVEL_5_2 = 10, + V4L2_MPEG_VIDEO_VP9_LEVEL_6_0 = 11, + V4L2_MPEG_VIDEO_VP9_LEVEL_6_1 = 12, + V4L2_MPEG_VIDEO_VP9_LEVEL_6_2 = 13, +}; + +/* CIDs for HEVC encoding. */ + +#define V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP (V4L2_CID_CODEC_BASE + 600) +#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP (V4L2_CID_CODEC_BASE + 601) +#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP (V4L2_CID_CODEC_BASE + 602) +#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP (V4L2_CID_CODEC_BASE + 603) +#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP (V4L2_CID_CODEC_BASE + 604) +#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP (V4L2_CID_CODEC_BASE + 605) +#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE (V4L2_CID_CODEC_BASE + 606) +enum v4l2_mpeg_video_hevc_hier_coding_type { + V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B = 0, + V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P = 1, +}; +#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER (V4L2_CID_CODEC_BASE + 607) +#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP (V4L2_CID_CODEC_BASE + 608) +#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP (V4L2_CID_CODEC_BASE + 609) +#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP (V4L2_CID_CODEC_BASE + 610) +#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP (V4L2_CID_CODEC_BASE + 611) +#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP (V4L2_CID_CODEC_BASE + 612) +#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP (V4L2_CID_CODEC_BASE + 613) +#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP (V4L2_CID_CODEC_BASE + 614) +#define V4L2_CID_MPEG_VIDEO_HEVC_PROFILE (V4L2_CID_CODEC_BASE + 615) +enum v4l2_mpeg_video_hevc_profile { + V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN = 0, + V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE = 1, + V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10 = 2, +}; +#define V4L2_CID_MPEG_VIDEO_HEVC_LEVEL (V4L2_CID_CODEC_BASE + 616) +enum v4l2_mpeg_video_hevc_level { + V4L2_MPEG_VIDEO_HEVC_LEVEL_1 = 0, + V4L2_MPEG_VIDEO_HEVC_LEVEL_2 = 1, + V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1 = 2, + V4L2_MPEG_VIDEO_HEVC_LEVEL_3 = 3, + V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1 = 4, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4 = 5, + V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1 = 6, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5 = 7, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1 = 8, + V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2 = 9, + V4L2_MPEG_VIDEO_HEVC_LEVEL_6 = 10, + V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1 = 11, + V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2 = 12, +}; +#define V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION (V4L2_CID_CODEC_BASE + 617) +#define V4L2_CID_MPEG_VIDEO_HEVC_TIER (V4L2_CID_CODEC_BASE + 618) +enum v4l2_mpeg_video_hevc_tier { + V4L2_MPEG_VIDEO_HEVC_TIER_MAIN = 0, + V4L2_MPEG_VIDEO_HEVC_TIER_HIGH = 1, +}; +#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH (V4L2_CID_CODEC_BASE + 619) +#define V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE (V4L2_CID_CODEC_BASE + 620) +enum v4l2_cid_mpeg_video_hevc_loop_filter_mode { + V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED = 0, + V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_ENABLED = 1, + V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY = 2, +}; +#define V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2 (V4L2_CID_CODEC_BASE + 621) +#define V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2 (V4L2_CID_CODEC_BASE + 622) +#define V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE (V4L2_CID_CODEC_BASE + 623) +enum v4l2_cid_mpeg_video_hevc_refresh_type { + V4L2_MPEG_VIDEO_HEVC_REFRESH_NONE = 0, + V4L2_MPEG_VIDEO_HEVC_REFRESH_CRA = 1, + V4L2_MPEG_VIDEO_HEVC_REFRESH_IDR = 2, +}; +#define V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD (V4L2_CID_CODEC_BASE + 624) +#define V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU (V4L2_CID_CODEC_BASE + 625) +#define V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED (V4L2_CID_CODEC_BASE + 626) +#define V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT (V4L2_CID_CODEC_BASE + 627) +#define V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB (V4L2_CID_CODEC_BASE + 628) +#define V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID (V4L2_CID_CODEC_BASE + 629) +#define V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING (V4L2_CID_CODEC_BASE + 630) +#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1 (V4L2_CID_CODEC_BASE + 631) +#define V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT (V4L2_CID_CODEC_BASE + 632) +#define V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION (V4L2_CID_CODEC_BASE + 633) +#define V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE (V4L2_CID_CODEC_BASE + 634) +#define V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD (V4L2_CID_CODEC_BASE + 635) +enum v4l2_cid_mpeg_video_hevc_size_of_length_field { + V4L2_MPEG_VIDEO_HEVC_SIZE_0 = 0, + V4L2_MPEG_VIDEO_HEVC_SIZE_1 = 1, + V4L2_MPEG_VIDEO_HEVC_SIZE_2 = 2, + V4L2_MPEG_VIDEO_HEVC_SIZE_4 = 3, +}; +#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR (V4L2_CID_CODEC_BASE + 636) +#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR (V4L2_CID_CODEC_BASE + 637) +#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR (V4L2_CID_CODEC_BASE + 638) +#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR (V4L2_CID_CODEC_BASE + 639) +#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR (V4L2_CID_CODEC_BASE + 640) +#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR (V4L2_CID_CODEC_BASE + 641) +#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR (V4L2_CID_CODEC_BASE + 642) +#define V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES (V4L2_CID_CODEC_BASE + 643) +#define V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR (V4L2_CID_CODEC_BASE + 644) +#define V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY (V4L2_CID_CODEC_BASE + 645) +#define V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE (V4L2_CID_CODEC_BASE + 646) +enum v4l2_mpeg_video_frame_skip_mode { + V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED = 0, + V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_LEVEL_LIMIT = 1, + V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT = 2, +}; + +#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP (V4L2_CID_CODEC_BASE + 647) +#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP (V4L2_CID_CODEC_BASE + 648) +#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP (V4L2_CID_CODEC_BASE + 649) +#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP (V4L2_CID_CODEC_BASE + 650) +#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP (V4L2_CID_CODEC_BASE + 651) +#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP (V4L2_CID_CODEC_BASE + 652) + +#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY (V4L2_CID_CODEC_BASE + 653) +#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE (V4L2_CID_CODEC_BASE + 654) + +#define V4L2_CID_MPEG_VIDEO_AV1_PROFILE (V4L2_CID_CODEC_BASE + 655) +/** + * enum v4l2_mpeg_video_av1_profile - AV1 profiles + * + * @V4L2_MPEG_VIDEO_AV1_PROFILE_MAIN: compliant decoders must be able to decode + * streams with seq_profile equal to 0. + * @V4L2_MPEG_VIDEO_AV1_PROFILE_HIGH: compliant decoders must be able to decode + * streams with seq_profile equal less than or equal to 1. + * @V4L2_MPEG_VIDEO_AV1_PROFILE_PROFESSIONAL: compliant decoders must be able to + * decode streams with seq_profile less than or equal to 2. + * + * Conveys the highest profile a decoder can work with. + */ +enum v4l2_mpeg_video_av1_profile { + V4L2_MPEG_VIDEO_AV1_PROFILE_MAIN = 0, + V4L2_MPEG_VIDEO_AV1_PROFILE_HIGH = 1, + V4L2_MPEG_VIDEO_AV1_PROFILE_PROFESSIONAL = 2, +}; + +#define V4L2_CID_MPEG_VIDEO_AV1_LEVEL (V4L2_CID_CODEC_BASE + 656) +/** + * enum v4l2_mpeg_video_av1_level - AV1 levels + * + * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_0: Level 2.0. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_1: Level 2.1. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_2: Level 2.2. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_3: Level 2.3. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_0: Level 3.0. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_1: Level 3.1. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_2: Level 3.2. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_3: Level 3.3. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_0: Level 4.0. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_1: Level 4.1. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_2: Level 4.2. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_3: Level 4.3. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_0: Level 5.0. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_1: Level 5.1. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_2: Level 5.2. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_3: Level 5.3. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_0: Level 6.0. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_1: Level 6.1. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_2: Level 6.2. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_3: Level 6.3. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_0: Level 7.0. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_1: Level 7.1. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_2: Level 7.2. + * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_3: Level 7.3. + * + * Conveys the highest level a decoder can work with. + */ +enum v4l2_mpeg_video_av1_level { + V4L2_MPEG_VIDEO_AV1_LEVEL_2_0 = 0, + V4L2_MPEG_VIDEO_AV1_LEVEL_2_1 = 1, + V4L2_MPEG_VIDEO_AV1_LEVEL_2_2 = 2, + V4L2_MPEG_VIDEO_AV1_LEVEL_2_3 = 3, + + V4L2_MPEG_VIDEO_AV1_LEVEL_3_0 = 4, + V4L2_MPEG_VIDEO_AV1_LEVEL_3_1 = 5, + V4L2_MPEG_VIDEO_AV1_LEVEL_3_2 = 6, + V4L2_MPEG_VIDEO_AV1_LEVEL_3_3 = 7, + + V4L2_MPEG_VIDEO_AV1_LEVEL_4_0 = 8, + V4L2_MPEG_VIDEO_AV1_LEVEL_4_1 = 9, + V4L2_MPEG_VIDEO_AV1_LEVEL_4_2 = 10, + V4L2_MPEG_VIDEO_AV1_LEVEL_4_3 = 11, + + V4L2_MPEG_VIDEO_AV1_LEVEL_5_0 = 12, + V4L2_MPEG_VIDEO_AV1_LEVEL_5_1 = 13, + V4L2_MPEG_VIDEO_AV1_LEVEL_5_2 = 14, + V4L2_MPEG_VIDEO_AV1_LEVEL_5_3 = 15, + + V4L2_MPEG_VIDEO_AV1_LEVEL_6_0 = 16, + V4L2_MPEG_VIDEO_AV1_LEVEL_6_1 = 17, + V4L2_MPEG_VIDEO_AV1_LEVEL_6_2 = 18, + V4L2_MPEG_VIDEO_AV1_LEVEL_6_3 = 19, + + V4L2_MPEG_VIDEO_AV1_LEVEL_7_0 = 20, + V4L2_MPEG_VIDEO_AV1_LEVEL_7_1 = 21, + V4L2_MPEG_VIDEO_AV1_LEVEL_7_2 = 22, + V4L2_MPEG_VIDEO_AV1_LEVEL_7_3 = 23 +}; + +/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */ +#define V4L2_CID_CODEC_CX2341X_BASE (V4L2_CTRL_CLASS_CODEC | 0x1000) +#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_CODEC_CX2341X_BASE+0) +enum v4l2_mpeg_cx2341x_video_spatial_filter_mode { + V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL = 0, + V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO = 1, +}; +#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (V4L2_CID_CODEC_CX2341X_BASE+1) +#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE (V4L2_CID_CODEC_CX2341X_BASE+2) +enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type { + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF = 0, + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR = 1, + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_VERT = 2, + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE = 3, + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE = 4, +}; +#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE (V4L2_CID_CODEC_CX2341X_BASE+3) +enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type { + V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF = 0, + V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR = 1, +}; +#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE (V4L2_CID_CODEC_CX2341X_BASE+4) +enum v4l2_mpeg_cx2341x_video_temporal_filter_mode { + V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL = 0, + V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO = 1, +}; +#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (V4L2_CID_CODEC_CX2341X_BASE+5) +#define V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE (V4L2_CID_CODEC_CX2341X_BASE+6) +enum v4l2_mpeg_cx2341x_video_median_filter_type { + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF = 0, + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR = 1, + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_VERT = 2, + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT = 3, + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG = 4, +}; +#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_CODEC_CX2341X_BASE+7) +#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (V4L2_CID_CODEC_CX2341X_BASE+8) +#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_CODEC_CX2341X_BASE+9) +#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (V4L2_CID_CODEC_CX2341X_BASE+10) +#define V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (V4L2_CID_CODEC_CX2341X_BASE+11) + +/* MPEG-class control IDs specific to the Samsung MFC 5.1 driver as defined by V4L2 */ +#define V4L2_CID_CODEC_MFC51_BASE (V4L2_CTRL_CLASS_CODEC | 0x1100) + +#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY (V4L2_CID_CODEC_MFC51_BASE+0) +#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE (V4L2_CID_CODEC_MFC51_BASE+1) +#define V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE (V4L2_CID_CODEC_MFC51_BASE+2) +enum v4l2_mpeg_mfc51_video_frame_skip_mode { + V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED = 0, + V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_LEVEL_LIMIT = 1, + V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT = 2, +}; +#define V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE (V4L2_CID_CODEC_MFC51_BASE+3) +enum v4l2_mpeg_mfc51_video_force_frame_type { + V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED = 0, + V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_I_FRAME = 1, + V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_NOT_CODED = 2, +}; +#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING (V4L2_CID_CODEC_MFC51_BASE+4) +#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV (V4L2_CID_CODEC_MFC51_BASE+5) +#define V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT (V4L2_CID_CODEC_MFC51_BASE+6) +#define V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF (V4L2_CID_CODEC_MFC51_BASE+7) +#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY (V4L2_CID_CODEC_MFC51_BASE+50) +#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK (V4L2_CID_CODEC_MFC51_BASE+51) +#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH (V4L2_CID_CODEC_MFC51_BASE+52) +#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC (V4L2_CID_CODEC_MFC51_BASE+53) +#define V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P (V4L2_CID_CODEC_MFC51_BASE+54) + +/* Camera class control IDs */ + +#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900) +#define V4L2_CID_CAMERA_CLASS (V4L2_CTRL_CLASS_CAMERA | 1) + +#define V4L2_CID_EXPOSURE_AUTO (V4L2_CID_CAMERA_CLASS_BASE+1) +enum v4l2_exposure_auto_type { + V4L2_EXPOSURE_AUTO = 0, + V4L2_EXPOSURE_MANUAL = 1, + V4L2_EXPOSURE_SHUTTER_PRIORITY = 2, + V4L2_EXPOSURE_APERTURE_PRIORITY = 3 +}; +#define V4L2_CID_EXPOSURE_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+2) +#define V4L2_CID_EXPOSURE_AUTO_PRIORITY (V4L2_CID_CAMERA_CLASS_BASE+3) + +#define V4L2_CID_PAN_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+4) +#define V4L2_CID_TILT_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+5) +#define V4L2_CID_PAN_RESET (V4L2_CID_CAMERA_CLASS_BASE+6) +#define V4L2_CID_TILT_RESET (V4L2_CID_CAMERA_CLASS_BASE+7) + +#define V4L2_CID_PAN_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+8) +#define V4L2_CID_TILT_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+9) + +#define V4L2_CID_FOCUS_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+10) +#define V4L2_CID_FOCUS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+11) +#define V4L2_CID_FOCUS_AUTO (V4L2_CID_CAMERA_CLASS_BASE+12) + +#define V4L2_CID_ZOOM_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+13) +#define V4L2_CID_ZOOM_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+14) +#define V4L2_CID_ZOOM_CONTINUOUS (V4L2_CID_CAMERA_CLASS_BASE+15) + +#define V4L2_CID_PRIVACY (V4L2_CID_CAMERA_CLASS_BASE+16) + +#define V4L2_CID_IRIS_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+17) +#define V4L2_CID_IRIS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+18) + +#define V4L2_CID_AUTO_EXPOSURE_BIAS (V4L2_CID_CAMERA_CLASS_BASE+19) + +#define V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE (V4L2_CID_CAMERA_CLASS_BASE+20) +enum v4l2_auto_n_preset_white_balance { + V4L2_WHITE_BALANCE_MANUAL = 0, + V4L2_WHITE_BALANCE_AUTO = 1, + V4L2_WHITE_BALANCE_INCANDESCENT = 2, + V4L2_WHITE_BALANCE_FLUORESCENT = 3, + V4L2_WHITE_BALANCE_FLUORESCENT_H = 4, + V4L2_WHITE_BALANCE_HORIZON = 5, + V4L2_WHITE_BALANCE_DAYLIGHT = 6, + V4L2_WHITE_BALANCE_FLASH = 7, + V4L2_WHITE_BALANCE_CLOUDY = 8, + V4L2_WHITE_BALANCE_SHADE = 9, +}; + +#define V4L2_CID_WIDE_DYNAMIC_RANGE (V4L2_CID_CAMERA_CLASS_BASE+21) +#define V4L2_CID_IMAGE_STABILIZATION (V4L2_CID_CAMERA_CLASS_BASE+22) + +#define V4L2_CID_ISO_SENSITIVITY (V4L2_CID_CAMERA_CLASS_BASE+23) +#define V4L2_CID_ISO_SENSITIVITY_AUTO (V4L2_CID_CAMERA_CLASS_BASE+24) +enum v4l2_iso_sensitivity_auto_type { + V4L2_ISO_SENSITIVITY_MANUAL = 0, + V4L2_ISO_SENSITIVITY_AUTO = 1, +}; + +#define V4L2_CID_EXPOSURE_METERING (V4L2_CID_CAMERA_CLASS_BASE+25) +enum v4l2_exposure_metering { + V4L2_EXPOSURE_METERING_AVERAGE = 0, + V4L2_EXPOSURE_METERING_CENTER_WEIGHTED = 1, + V4L2_EXPOSURE_METERING_SPOT = 2, + V4L2_EXPOSURE_METERING_MATRIX = 3, +}; + +#define V4L2_CID_SCENE_MODE (V4L2_CID_CAMERA_CLASS_BASE+26) +enum v4l2_scene_mode { + V4L2_SCENE_MODE_NONE = 0, + V4L2_SCENE_MODE_BACKLIGHT = 1, + V4L2_SCENE_MODE_BEACH_SNOW = 2, + V4L2_SCENE_MODE_CANDLE_LIGHT = 3, + V4L2_SCENE_MODE_DAWN_DUSK = 4, + V4L2_SCENE_MODE_FALL_COLORS = 5, + V4L2_SCENE_MODE_FIREWORKS = 6, + V4L2_SCENE_MODE_LANDSCAPE = 7, + V4L2_SCENE_MODE_NIGHT = 8, + V4L2_SCENE_MODE_PARTY_INDOOR = 9, + V4L2_SCENE_MODE_PORTRAIT = 10, + V4L2_SCENE_MODE_SPORTS = 11, + V4L2_SCENE_MODE_SUNSET = 12, + V4L2_SCENE_MODE_TEXT = 13, +}; + +#define V4L2_CID_3A_LOCK (V4L2_CID_CAMERA_CLASS_BASE+27) +#define V4L2_LOCK_EXPOSURE (1 << 0) +#define V4L2_LOCK_WHITE_BALANCE (1 << 1) +#define V4L2_LOCK_FOCUS (1 << 2) + +#define V4L2_CID_AUTO_FOCUS_START (V4L2_CID_CAMERA_CLASS_BASE+28) +#define V4L2_CID_AUTO_FOCUS_STOP (V4L2_CID_CAMERA_CLASS_BASE+29) +#define V4L2_CID_AUTO_FOCUS_STATUS (V4L2_CID_CAMERA_CLASS_BASE+30) +#define V4L2_AUTO_FOCUS_STATUS_IDLE (0 << 0) +#define V4L2_AUTO_FOCUS_STATUS_BUSY (1 << 0) +#define V4L2_AUTO_FOCUS_STATUS_REACHED (1 << 1) +#define V4L2_AUTO_FOCUS_STATUS_FAILED (1 << 2) + +#define V4L2_CID_AUTO_FOCUS_RANGE (V4L2_CID_CAMERA_CLASS_BASE+31) +enum v4l2_auto_focus_range { + V4L2_AUTO_FOCUS_RANGE_AUTO = 0, + V4L2_AUTO_FOCUS_RANGE_NORMAL = 1, + V4L2_AUTO_FOCUS_RANGE_MACRO = 2, + V4L2_AUTO_FOCUS_RANGE_INFINITY = 3, +}; + +#define V4L2_CID_PAN_SPEED (V4L2_CID_CAMERA_CLASS_BASE+32) +#define V4L2_CID_TILT_SPEED (V4L2_CID_CAMERA_CLASS_BASE+33) + +#define V4L2_CID_CAMERA_ORIENTATION (V4L2_CID_CAMERA_CLASS_BASE+34) +#define V4L2_CAMERA_ORIENTATION_FRONT 0 +#define V4L2_CAMERA_ORIENTATION_BACK 1 +#define V4L2_CAMERA_ORIENTATION_EXTERNAL 2 + +#define V4L2_CID_CAMERA_SENSOR_ROTATION (V4L2_CID_CAMERA_CLASS_BASE+35) + +#define V4L2_CID_HDR_SENSOR_MODE (V4L2_CID_CAMERA_CLASS_BASE+36) + +/* FM Modulator class control IDs */ + +#define V4L2_CID_FM_TX_CLASS_BASE (V4L2_CTRL_CLASS_FM_TX | 0x900) +#define V4L2_CID_FM_TX_CLASS (V4L2_CTRL_CLASS_FM_TX | 1) + +#define V4L2_CID_RDS_TX_DEVIATION (V4L2_CID_FM_TX_CLASS_BASE + 1) +#define V4L2_CID_RDS_TX_PI (V4L2_CID_FM_TX_CLASS_BASE + 2) +#define V4L2_CID_RDS_TX_PTY (V4L2_CID_FM_TX_CLASS_BASE + 3) +#define V4L2_CID_RDS_TX_PS_NAME (V4L2_CID_FM_TX_CLASS_BASE + 5) +#define V4L2_CID_RDS_TX_RADIO_TEXT (V4L2_CID_FM_TX_CLASS_BASE + 6) +#define V4L2_CID_RDS_TX_MONO_STEREO (V4L2_CID_FM_TX_CLASS_BASE + 7) +#define V4L2_CID_RDS_TX_ARTIFICIAL_HEAD (V4L2_CID_FM_TX_CLASS_BASE + 8) +#define V4L2_CID_RDS_TX_COMPRESSED (V4L2_CID_FM_TX_CLASS_BASE + 9) +#define V4L2_CID_RDS_TX_DYNAMIC_PTY (V4L2_CID_FM_TX_CLASS_BASE + 10) +#define V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT (V4L2_CID_FM_TX_CLASS_BASE + 11) +#define V4L2_CID_RDS_TX_TRAFFIC_PROGRAM (V4L2_CID_FM_TX_CLASS_BASE + 12) +#define V4L2_CID_RDS_TX_MUSIC_SPEECH (V4L2_CID_FM_TX_CLASS_BASE + 13) +#define V4L2_CID_RDS_TX_ALT_FREQS_ENABLE (V4L2_CID_FM_TX_CLASS_BASE + 14) +#define V4L2_CID_RDS_TX_ALT_FREQS (V4L2_CID_FM_TX_CLASS_BASE + 15) + +#define V4L2_CID_AUDIO_LIMITER_ENABLED (V4L2_CID_FM_TX_CLASS_BASE + 64) +#define V4L2_CID_AUDIO_LIMITER_RELEASE_TIME (V4L2_CID_FM_TX_CLASS_BASE + 65) +#define V4L2_CID_AUDIO_LIMITER_DEVIATION (V4L2_CID_FM_TX_CLASS_BASE + 66) + +#define V4L2_CID_AUDIO_COMPRESSION_ENABLED (V4L2_CID_FM_TX_CLASS_BASE + 80) +#define V4L2_CID_AUDIO_COMPRESSION_GAIN (V4L2_CID_FM_TX_CLASS_BASE + 81) +#define V4L2_CID_AUDIO_COMPRESSION_THRESHOLD (V4L2_CID_FM_TX_CLASS_BASE + 82) +#define V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME (V4L2_CID_FM_TX_CLASS_BASE + 83) +#define V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME (V4L2_CID_FM_TX_CLASS_BASE + 84) + +#define V4L2_CID_PILOT_TONE_ENABLED (V4L2_CID_FM_TX_CLASS_BASE + 96) +#define V4L2_CID_PILOT_TONE_DEVIATION (V4L2_CID_FM_TX_CLASS_BASE + 97) +#define V4L2_CID_PILOT_TONE_FREQUENCY (V4L2_CID_FM_TX_CLASS_BASE + 98) + +#define V4L2_CID_TUNE_PREEMPHASIS (V4L2_CID_FM_TX_CLASS_BASE + 112) +enum v4l2_preemphasis { + V4L2_PREEMPHASIS_DISABLED = 0, + V4L2_PREEMPHASIS_50_uS = 1, + V4L2_PREEMPHASIS_75_uS = 2, +}; +#define V4L2_CID_TUNE_POWER_LEVEL (V4L2_CID_FM_TX_CLASS_BASE + 113) +#define V4L2_CID_TUNE_ANTENNA_CAPACITOR (V4L2_CID_FM_TX_CLASS_BASE + 114) + + +/* Flash and privacy (indicator) light controls */ + +#define V4L2_CID_FLASH_CLASS_BASE (V4L2_CTRL_CLASS_FLASH | 0x900) +#define V4L2_CID_FLASH_CLASS (V4L2_CTRL_CLASS_FLASH | 1) + +#define V4L2_CID_FLASH_LED_MODE (V4L2_CID_FLASH_CLASS_BASE + 1) +enum v4l2_flash_led_mode { + V4L2_FLASH_LED_MODE_NONE, + V4L2_FLASH_LED_MODE_FLASH, + V4L2_FLASH_LED_MODE_TORCH, +}; + +#define V4L2_CID_FLASH_STROBE_SOURCE (V4L2_CID_FLASH_CLASS_BASE + 2) +enum v4l2_flash_strobe_source { + V4L2_FLASH_STROBE_SOURCE_SOFTWARE, + V4L2_FLASH_STROBE_SOURCE_EXTERNAL, +}; + +#define V4L2_CID_FLASH_STROBE (V4L2_CID_FLASH_CLASS_BASE + 3) +#define V4L2_CID_FLASH_STROBE_STOP (V4L2_CID_FLASH_CLASS_BASE + 4) +#define V4L2_CID_FLASH_STROBE_STATUS (V4L2_CID_FLASH_CLASS_BASE + 5) + +#define V4L2_CID_FLASH_TIMEOUT (V4L2_CID_FLASH_CLASS_BASE + 6) +#define V4L2_CID_FLASH_INTENSITY (V4L2_CID_FLASH_CLASS_BASE + 7) +#define V4L2_CID_FLASH_TORCH_INTENSITY (V4L2_CID_FLASH_CLASS_BASE + 8) +#define V4L2_CID_FLASH_INDICATOR_INTENSITY (V4L2_CID_FLASH_CLASS_BASE + 9) + +#define V4L2_CID_FLASH_FAULT (V4L2_CID_FLASH_CLASS_BASE + 10) +#define V4L2_FLASH_FAULT_OVER_VOLTAGE (1 << 0) +#define V4L2_FLASH_FAULT_TIMEOUT (1 << 1) +#define V4L2_FLASH_FAULT_OVER_TEMPERATURE (1 << 2) +#define V4L2_FLASH_FAULT_SHORT_CIRCUIT (1 << 3) +#define V4L2_FLASH_FAULT_OVER_CURRENT (1 << 4) +#define V4L2_FLASH_FAULT_INDICATOR (1 << 5) +#define V4L2_FLASH_FAULT_UNDER_VOLTAGE (1 << 6) +#define V4L2_FLASH_FAULT_INPUT_VOLTAGE (1 << 7) +#define V4L2_FLASH_FAULT_LED_OVER_TEMPERATURE (1 << 8) + +#define V4L2_CID_FLASH_CHARGE (V4L2_CID_FLASH_CLASS_BASE + 11) +#define V4L2_CID_FLASH_READY (V4L2_CID_FLASH_CLASS_BASE + 12) + + +/* JPEG-class control IDs */ + +#define V4L2_CID_JPEG_CLASS_BASE (V4L2_CTRL_CLASS_JPEG | 0x900) +#define V4L2_CID_JPEG_CLASS (V4L2_CTRL_CLASS_JPEG | 1) + +#define V4L2_CID_JPEG_CHROMA_SUBSAMPLING (V4L2_CID_JPEG_CLASS_BASE + 1) +enum v4l2_jpeg_chroma_subsampling { + V4L2_JPEG_CHROMA_SUBSAMPLING_444 = 0, + V4L2_JPEG_CHROMA_SUBSAMPLING_422 = 1, + V4L2_JPEG_CHROMA_SUBSAMPLING_420 = 2, + V4L2_JPEG_CHROMA_SUBSAMPLING_411 = 3, + V4L2_JPEG_CHROMA_SUBSAMPLING_410 = 4, + V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY = 5, +}; +#define V4L2_CID_JPEG_RESTART_INTERVAL (V4L2_CID_JPEG_CLASS_BASE + 2) +#define V4L2_CID_JPEG_COMPRESSION_QUALITY (V4L2_CID_JPEG_CLASS_BASE + 3) + +#define V4L2_CID_JPEG_ACTIVE_MARKER (V4L2_CID_JPEG_CLASS_BASE + 4) +#define V4L2_JPEG_ACTIVE_MARKER_APP0 (1 << 0) +#define V4L2_JPEG_ACTIVE_MARKER_APP1 (1 << 1) +#define V4L2_JPEG_ACTIVE_MARKER_COM (1 << 16) +#define V4L2_JPEG_ACTIVE_MARKER_DQT (1 << 17) +#define V4L2_JPEG_ACTIVE_MARKER_DHT (1 << 18) + + +/* Image source controls */ +#define V4L2_CID_IMAGE_SOURCE_CLASS_BASE (V4L2_CTRL_CLASS_IMAGE_SOURCE | 0x900) +#define V4L2_CID_IMAGE_SOURCE_CLASS (V4L2_CTRL_CLASS_IMAGE_SOURCE | 1) + +#define V4L2_CID_VBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 1) +#define V4L2_CID_HBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 2) +#define V4L2_CID_ANALOGUE_GAIN (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 3) +#define V4L2_CID_TEST_PATTERN_RED (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 4) +#define V4L2_CID_TEST_PATTERN_GREENR (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 5) +#define V4L2_CID_TEST_PATTERN_BLUE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 6) +#define V4L2_CID_TEST_PATTERN_GREENB (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 7) +#define V4L2_CID_UNIT_CELL_SIZE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 8) +#define V4L2_CID_NOTIFY_GAINS (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 9) + + +/* Image processing controls */ + +#define V4L2_CID_IMAGE_PROC_CLASS_BASE (V4L2_CTRL_CLASS_IMAGE_PROC | 0x900) +#define V4L2_CID_IMAGE_PROC_CLASS (V4L2_CTRL_CLASS_IMAGE_PROC | 1) + +#define V4L2_CID_LINK_FREQ (V4L2_CID_IMAGE_PROC_CLASS_BASE + 1) +#define V4L2_CID_PIXEL_RATE (V4L2_CID_IMAGE_PROC_CLASS_BASE + 2) +#define V4L2_CID_TEST_PATTERN (V4L2_CID_IMAGE_PROC_CLASS_BASE + 3) +#define V4L2_CID_DEINTERLACING_MODE (V4L2_CID_IMAGE_PROC_CLASS_BASE + 4) +#define V4L2_CID_DIGITAL_GAIN (V4L2_CID_IMAGE_PROC_CLASS_BASE + 5) + +/* DV-class control IDs defined by V4L2 */ +#define V4L2_CID_DV_CLASS_BASE (V4L2_CTRL_CLASS_DV | 0x900) +#define V4L2_CID_DV_CLASS (V4L2_CTRL_CLASS_DV | 1) + +#define V4L2_CID_DV_TX_HOTPLUG (V4L2_CID_DV_CLASS_BASE + 1) +#define V4L2_CID_DV_TX_RXSENSE (V4L2_CID_DV_CLASS_BASE + 2) +#define V4L2_CID_DV_TX_EDID_PRESENT (V4L2_CID_DV_CLASS_BASE + 3) +#define V4L2_CID_DV_TX_MODE (V4L2_CID_DV_CLASS_BASE + 4) +enum v4l2_dv_tx_mode { + V4L2_DV_TX_MODE_DVI_D = 0, + V4L2_DV_TX_MODE_HDMI = 1, +}; +#define V4L2_CID_DV_TX_RGB_RANGE (V4L2_CID_DV_CLASS_BASE + 5) +enum v4l2_dv_rgb_range { + V4L2_DV_RGB_RANGE_AUTO = 0, + V4L2_DV_RGB_RANGE_LIMITED = 1, + V4L2_DV_RGB_RANGE_FULL = 2, +}; + +#define V4L2_CID_DV_TX_IT_CONTENT_TYPE (V4L2_CID_DV_CLASS_BASE + 6) +enum v4l2_dv_it_content_type { + V4L2_DV_IT_CONTENT_TYPE_GRAPHICS = 0, + V4L2_DV_IT_CONTENT_TYPE_PHOTO = 1, + V4L2_DV_IT_CONTENT_TYPE_CINEMA = 2, + V4L2_DV_IT_CONTENT_TYPE_GAME = 3, + V4L2_DV_IT_CONTENT_TYPE_NO_ITC = 4, +}; + +#define V4L2_CID_DV_RX_POWER_PRESENT (V4L2_CID_DV_CLASS_BASE + 100) +#define V4L2_CID_DV_RX_RGB_RANGE (V4L2_CID_DV_CLASS_BASE + 101) +#define V4L2_CID_DV_RX_IT_CONTENT_TYPE (V4L2_CID_DV_CLASS_BASE + 102) + +#define V4L2_CID_FM_RX_CLASS_BASE (V4L2_CTRL_CLASS_FM_RX | 0x900) +#define V4L2_CID_FM_RX_CLASS (V4L2_CTRL_CLASS_FM_RX | 1) + +#define V4L2_CID_TUNE_DEEMPHASIS (V4L2_CID_FM_RX_CLASS_BASE + 1) +enum v4l2_deemphasis { + V4L2_DEEMPHASIS_DISABLED = V4L2_PREEMPHASIS_DISABLED, + V4L2_DEEMPHASIS_50_uS = V4L2_PREEMPHASIS_50_uS, + V4L2_DEEMPHASIS_75_uS = V4L2_PREEMPHASIS_75_uS, +}; + +#define V4L2_CID_RDS_RECEPTION (V4L2_CID_FM_RX_CLASS_BASE + 2) +#define V4L2_CID_RDS_RX_PTY (V4L2_CID_FM_RX_CLASS_BASE + 3) +#define V4L2_CID_RDS_RX_PS_NAME (V4L2_CID_FM_RX_CLASS_BASE + 4) +#define V4L2_CID_RDS_RX_RADIO_TEXT (V4L2_CID_FM_RX_CLASS_BASE + 5) +#define V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT (V4L2_CID_FM_RX_CLASS_BASE + 6) +#define V4L2_CID_RDS_RX_TRAFFIC_PROGRAM (V4L2_CID_FM_RX_CLASS_BASE + 7) +#define V4L2_CID_RDS_RX_MUSIC_SPEECH (V4L2_CID_FM_RX_CLASS_BASE + 8) + +#define V4L2_CID_RF_TUNER_CLASS_BASE (V4L2_CTRL_CLASS_RF_TUNER | 0x900) +#define V4L2_CID_RF_TUNER_CLASS (V4L2_CTRL_CLASS_RF_TUNER | 1) + +#define V4L2_CID_RF_TUNER_BANDWIDTH_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 11) +#define V4L2_CID_RF_TUNER_BANDWIDTH (V4L2_CID_RF_TUNER_CLASS_BASE + 12) +#define V4L2_CID_RF_TUNER_RF_GAIN (V4L2_CID_RF_TUNER_CLASS_BASE + 32) +#define V4L2_CID_RF_TUNER_LNA_GAIN_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 41) +#define V4L2_CID_RF_TUNER_LNA_GAIN (V4L2_CID_RF_TUNER_CLASS_BASE + 42) +#define V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 51) +#define V4L2_CID_RF_TUNER_MIXER_GAIN (V4L2_CID_RF_TUNER_CLASS_BASE + 52) +#define V4L2_CID_RF_TUNER_IF_GAIN_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 61) +#define V4L2_CID_RF_TUNER_IF_GAIN (V4L2_CID_RF_TUNER_CLASS_BASE + 62) +#define V4L2_CID_RF_TUNER_PLL_LOCK (V4L2_CID_RF_TUNER_CLASS_BASE + 91) + + +/* Detection-class control IDs defined by V4L2 */ +#define V4L2_CID_DETECT_CLASS_BASE (V4L2_CTRL_CLASS_DETECT | 0x900) +#define V4L2_CID_DETECT_CLASS (V4L2_CTRL_CLASS_DETECT | 1) + +#define V4L2_CID_DETECT_MD_MODE (V4L2_CID_DETECT_CLASS_BASE + 1) +enum v4l2_detect_md_mode { + V4L2_DETECT_MD_MODE_DISABLED = 0, + V4L2_DETECT_MD_MODE_GLOBAL = 1, + V4L2_DETECT_MD_MODE_THRESHOLD_GRID = 2, + V4L2_DETECT_MD_MODE_REGION_GRID = 3, +}; +#define V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD (V4L2_CID_DETECT_CLASS_BASE + 2) +#define V4L2_CID_DETECT_MD_THRESHOLD_GRID (V4L2_CID_DETECT_CLASS_BASE + 3) +#define V4L2_CID_DETECT_MD_REGION_GRID (V4L2_CID_DETECT_CLASS_BASE + 4) + + +/* Stateless CODECs controls */ +#define V4L2_CID_CODEC_STATELESS_BASE (V4L2_CTRL_CLASS_CODEC_STATELESS | 0x900) +#define V4L2_CID_CODEC_STATELESS_CLASS (V4L2_CTRL_CLASS_CODEC_STATELESS | 1) + +#define V4L2_CID_STATELESS_H264_DECODE_MODE (V4L2_CID_CODEC_STATELESS_BASE + 0) +/** + * enum v4l2_stateless_h264_decode_mode - Decoding mode + * + * @V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED: indicates that decoding + * is performed one slice at a time. In this mode, + * V4L2_CID_STATELESS_H264_SLICE_PARAMS must contain the parsed slice + * parameters and the OUTPUT buffer must contain a single slice. + * V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF feature is used + * in order to support multislice frames. + * @V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED: indicates that + * decoding is performed per frame. The OUTPUT buffer must contain + * all slices and also both fields. This mode is typically supported + * by device drivers that are able to parse the slice(s) header(s) + * in hardware. When this mode is selected, + * V4L2_CID_STATELESS_H264_SLICE_PARAMS is not used. + */ +enum v4l2_stateless_h264_decode_mode { + V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED, + V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED, +}; + +#define V4L2_CID_STATELESS_H264_START_CODE (V4L2_CID_CODEC_STATELESS_BASE + 1) +/** + * enum v4l2_stateless_h264_start_code - Start code + * + * @V4L2_STATELESS_H264_START_CODE_NONE: slices are passed + * to the driver without any start code. + * @V4L2_STATELESS_H264_START_CODE_ANNEX_B: slices are passed + * to the driver with an Annex B start code prefix + * (legal start codes can be 3-bytes 0x000001 or 4-bytes 0x00000001). + * This mode is typically supported by device drivers that parse + * the start code in hardware. + */ +enum v4l2_stateless_h264_start_code { + V4L2_STATELESS_H264_START_CODE_NONE, + V4L2_STATELESS_H264_START_CODE_ANNEX_B, +}; + +#define V4L2_H264_SPS_CONSTRAINT_SET0_FLAG 0x01 +#define V4L2_H264_SPS_CONSTRAINT_SET1_FLAG 0x02 +#define V4L2_H264_SPS_CONSTRAINT_SET2_FLAG 0x04 +#define V4L2_H264_SPS_CONSTRAINT_SET3_FLAG 0x08 +#define V4L2_H264_SPS_CONSTRAINT_SET4_FLAG 0x10 +#define V4L2_H264_SPS_CONSTRAINT_SET5_FLAG 0x20 + +#define V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE 0x01 +#define V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS 0x02 +#define V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO 0x04 +#define V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED 0x08 +#define V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY 0x10 +#define V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD 0x20 +#define V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE 0x40 + +#define V4L2_H264_SPS_HAS_CHROMA_FORMAT(sps) \ + ((sps)->profile_idc == 100 || (sps)->profile_idc == 110 || \ + (sps)->profile_idc == 122 || (sps)->profile_idc == 244 || \ + (sps)->profile_idc == 44 || (sps)->profile_idc == 83 || \ + (sps)->profile_idc == 86 || (sps)->profile_idc == 118 || \ + (sps)->profile_idc == 128 || (sps)->profile_idc == 138 || \ + (sps)->profile_idc == 139 || (sps)->profile_idc == 134 || \ + (sps)->profile_idc == 135) + +#define V4L2_CID_STATELESS_H264_SPS (V4L2_CID_CODEC_STATELESS_BASE + 2) +/** + * struct v4l2_ctrl_h264_sps - H264 sequence parameter set + * + * All the members on this sequence parameter set structure match the + * sequence parameter set syntax as specified by the H264 specification. + * + * @profile_idc: see H264 specification. + * @constraint_set_flags: see H264 specification. + * @level_idc: see H264 specification. + * @seq_parameter_set_id: see H264 specification. + * @chroma_format_idc: see H264 specification. + * @bit_depth_luma_minus8: see H264 specification. + * @bit_depth_chroma_minus8: see H264 specification. + * @log2_max_frame_num_minus4: see H264 specification. + * @pic_order_cnt_type: see H264 specification. + * @log2_max_pic_order_cnt_lsb_minus4: see H264 specification. + * @max_num_ref_frames: see H264 specification. + * @num_ref_frames_in_pic_order_cnt_cycle: see H264 specification. + * @offset_for_ref_frame: see H264 specification. + * @offset_for_non_ref_pic: see H264 specification. + * @offset_for_top_to_bottom_field: see H264 specification. + * @pic_width_in_mbs_minus1: see H264 specification. + * @pic_height_in_map_units_minus1: see H264 specification. + * @flags: see V4L2_H264_SPS_FLAG_{}. + */ +struct v4l2_ctrl_h264_sps { + __u8 profile_idc; + __u8 constraint_set_flags; + __u8 level_idc; + __u8 seq_parameter_set_id; + __u8 chroma_format_idc; + __u8 bit_depth_luma_minus8; + __u8 bit_depth_chroma_minus8; + __u8 log2_max_frame_num_minus4; + __u8 pic_order_cnt_type; + __u8 log2_max_pic_order_cnt_lsb_minus4; + __u8 max_num_ref_frames; + __u8 num_ref_frames_in_pic_order_cnt_cycle; + __s32 offset_for_ref_frame[255]; + __s32 offset_for_non_ref_pic; + __s32 offset_for_top_to_bottom_field; + __u16 pic_width_in_mbs_minus1; + __u16 pic_height_in_map_units_minus1; + __u32 flags; +}; + +#define V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE 0x0001 +#define V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT 0x0002 +#define V4L2_H264_PPS_FLAG_WEIGHTED_PRED 0x0004 +#define V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT 0x0008 +#define V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED 0x0010 +#define V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT 0x0020 +#define V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE 0x0040 +#define V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT 0x0080 + +#define V4L2_CID_STATELESS_H264_PPS (V4L2_CID_CODEC_STATELESS_BASE + 3) +/** + * struct v4l2_ctrl_h264_pps - H264 picture parameter set + * + * Except where noted, all the members on this picture parameter set + * structure match the picture parameter set syntax as specified + * by the H264 specification. + * + * In particular, V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT flag + * has a specific meaning. This flag should be set if a non-flat + * scaling matrix applies to the picture. In this case, applications + * are expected to use V4L2_CID_STATELESS_H264_SCALING_MATRIX, + * to pass the values of the non-flat matrices. + * + * @pic_parameter_set_id: see H264 specification. + * @seq_parameter_set_id: see H264 specification. + * @num_slice_groups_minus1: see H264 specification. + * @num_ref_idx_l0_default_active_minus1: see H264 specification. + * @num_ref_idx_l1_default_active_minus1: see H264 specification. + * @weighted_bipred_idc: see H264 specification. + * @pic_init_qp_minus26: see H264 specification. + * @pic_init_qs_minus26: see H264 specification. + * @chroma_qp_index_offset: see H264 specification. + * @second_chroma_qp_index_offset: see H264 specification. + * @flags: see V4L2_H264_PPS_FLAG_{}. + */ +struct v4l2_ctrl_h264_pps { + __u8 pic_parameter_set_id; + __u8 seq_parameter_set_id; + __u8 num_slice_groups_minus1; + __u8 num_ref_idx_l0_default_active_minus1; + __u8 num_ref_idx_l1_default_active_minus1; + __u8 weighted_bipred_idc; + __s8 pic_init_qp_minus26; + __s8 pic_init_qs_minus26; + __s8 chroma_qp_index_offset; + __s8 second_chroma_qp_index_offset; + __u16 flags; +}; + +#define V4L2_CID_STATELESS_H264_SCALING_MATRIX (V4L2_CID_CODEC_STATELESS_BASE + 4) +/** + * struct v4l2_ctrl_h264_scaling_matrix - H264 scaling matrices + * + * @scaling_list_4x4: scaling matrix after applying the inverse + * scanning process. Expected list order is Intra Y, Intra Cb, + * Intra Cr, Inter Y, Inter Cb, Inter Cr. The values on each + * scaling list are expected in raster scan order. + * @scaling_list_8x8: scaling matrix after applying the inverse + * scanning process. Expected list order is Intra Y, Inter Y, + * Intra Cb, Inter Cb, Intra Cr, Inter Cr. The values on each + * scaling list are expected in raster scan order. + * + * Note that the list order is different for the 4x4 and 8x8 + * matrices as per the H264 specification, see table 7-2 "Assignment + * of mnemonic names to scaling list indices and specification of + * fall-back rule". + */ +struct v4l2_ctrl_h264_scaling_matrix { + __u8 scaling_list_4x4[6][16]; + __u8 scaling_list_8x8[6][64]; +}; + +struct v4l2_h264_weight_factors { + __s16 luma_weight[32]; + __s16 luma_offset[32]; + __s16 chroma_weight[32][2]; + __s16 chroma_offset[32][2]; +}; + +#define V4L2_H264_CTRL_PRED_WEIGHTS_REQUIRED(pps, slice) \ + ((((pps)->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED) && \ + ((slice)->slice_type == V4L2_H264_SLICE_TYPE_P || \ + (slice)->slice_type == V4L2_H264_SLICE_TYPE_SP)) || \ + ((pps)->weighted_bipred_idc == 1 && \ + (slice)->slice_type == V4L2_H264_SLICE_TYPE_B)) + +#define V4L2_CID_STATELESS_H264_PRED_WEIGHTS (V4L2_CID_CODEC_STATELESS_BASE + 5) +/** + * struct v4l2_ctrl_h264_pred_weights - Prediction weight table + * + * Prediction weight table, which matches the syntax specified + * by the H264 specification. + * + * @luma_log2_weight_denom: see H264 specification. + * @chroma_log2_weight_denom: see H264 specification. + * @weight_factors: luma and chroma weight factors. + */ +struct v4l2_ctrl_h264_pred_weights { + __u16 luma_log2_weight_denom; + __u16 chroma_log2_weight_denom; + struct v4l2_h264_weight_factors weight_factors[2]; +}; + +#define V4L2_H264_SLICE_TYPE_P 0 +#define V4L2_H264_SLICE_TYPE_B 1 +#define V4L2_H264_SLICE_TYPE_I 2 +#define V4L2_H264_SLICE_TYPE_SP 3 +#define V4L2_H264_SLICE_TYPE_SI 4 + +#define V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED 0x01 +#define V4L2_H264_SLICE_FLAG_SP_FOR_SWITCH 0x02 + +#define V4L2_H264_TOP_FIELD_REF 0x1 +#define V4L2_H264_BOTTOM_FIELD_REF 0x2 +#define V4L2_H264_FRAME_REF 0x3 + +/** + * struct v4l2_h264_reference - H264 picture reference + * + * @fields: indicates how the picture is referenced. + * Valid values are V4L2_H264_{}_REF. + * @index: index into v4l2_ctrl_h264_decode_params.dpb[]. + */ +struct v4l2_h264_reference { + __u8 fields; + __u8 index; +}; + +/* + * Maximum DPB size, as specified by section 'A.3.1 Level limits + * common to the Baseline, Main, and Extended profiles'. + */ +#define V4L2_H264_NUM_DPB_ENTRIES 16 +#define V4L2_H264_REF_LIST_LEN (2 * V4L2_H264_NUM_DPB_ENTRIES) + +#define V4L2_CID_STATELESS_H264_SLICE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 6) +/** + * struct v4l2_ctrl_h264_slice_params - H264 slice parameters + * + * This structure holds the H264 syntax elements that are specified + * as non-invariant for the slices in a given frame. + * + * Slice invariant syntax elements are contained in struct + * v4l2_ctrl_h264_decode_params. This is done to reduce the API surface + * on frame-based decoders, where slice header parsing is done by the + * hardware. + * + * Slice invariant syntax elements are specified in specification section + * "7.4.3 Slice header semantics". + * + * Except where noted, the members on this struct match the slice header syntax. + * + * @header_bit_size: offset in bits to slice_data() from the beginning of this slice. + * @first_mb_in_slice: see H264 specification. + * @slice_type: see H264 specification. + * @colour_plane_id: see H264 specification. + * @redundant_pic_cnt: see H264 specification. + * @cabac_init_idc: see H264 specification. + * @slice_qp_delta: see H264 specification. + * @slice_qs_delta: see H264 specification. + * @disable_deblocking_filter_idc: see H264 specification. + * @slice_alpha_c0_offset_div2: see H264 specification. + * @slice_beta_offset_div2: see H264 specification. + * @num_ref_idx_l0_active_minus1: see H264 specification. + * @num_ref_idx_l1_active_minus1: see H264 specification. + * @reserved: padding field. Should be zeroed by applications. + * @ref_pic_list0: reference picture list 0 after applying the per-slice modifications. + * @ref_pic_list1: reference picture list 1 after applying the per-slice modifications. + * @flags: see V4L2_H264_SLICE_FLAG_{}. + */ +struct v4l2_ctrl_h264_slice_params { + __u32 header_bit_size; + __u32 first_mb_in_slice; + __u8 slice_type; + __u8 colour_plane_id; + __u8 redundant_pic_cnt; + __u8 cabac_init_idc; + __s8 slice_qp_delta; + __s8 slice_qs_delta; + __u8 disable_deblocking_filter_idc; + __s8 slice_alpha_c0_offset_div2; + __s8 slice_beta_offset_div2; + __u8 num_ref_idx_l0_active_minus1; + __u8 num_ref_idx_l1_active_minus1; + + __u8 reserved; + + struct v4l2_h264_reference ref_pic_list0[V4L2_H264_REF_LIST_LEN]; + struct v4l2_h264_reference ref_pic_list1[V4L2_H264_REF_LIST_LEN]; + + __u32 flags; +}; + +#define V4L2_H264_DPB_ENTRY_FLAG_VALID 0x01 +#define V4L2_H264_DPB_ENTRY_FLAG_ACTIVE 0x02 +#define V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM 0x04 +#define V4L2_H264_DPB_ENTRY_FLAG_FIELD 0x08 + +/** + * struct v4l2_h264_dpb_entry - H264 decoded picture buffer entry + * + * @reference_ts: timestamp of the V4L2 capture buffer to use as reference. + * The timestamp refers to the timestamp field in struct v4l2_buffer. + * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64. + * @pic_num: matches PicNum variable assigned during the reference + * picture lists construction process. + * @frame_num: frame identifier which matches frame_num syntax element. + * @fields: indicates how the DPB entry is referenced. Valid values are + * V4L2_H264_{}_REF. + * @reserved: padding field. Should be zeroed by applications. + * @top_field_order_cnt: matches TopFieldOrderCnt picture value. + * @bottom_field_order_cnt: matches BottomFieldOrderCnt picture value. + * Note that picture field is indicated by v4l2_buffer.field. + * @flags: see V4L2_H264_DPB_ENTRY_FLAG_{}. + */ +struct v4l2_h264_dpb_entry { + __u64 reference_ts; + __u32 pic_num; + __u16 frame_num; + __u8 fields; + __u8 reserved[5]; + __s32 top_field_order_cnt; + __s32 bottom_field_order_cnt; + __u32 flags; +}; + +#define V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC 0x01 +#define V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC 0x02 +#define V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD 0x04 +#define V4L2_H264_DECODE_PARAM_FLAG_PFRAME 0x08 +#define V4L2_H264_DECODE_PARAM_FLAG_BFRAME 0x10 + +#define V4L2_CID_STATELESS_H264_DECODE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 7) +/** + * struct v4l2_ctrl_h264_decode_params - H264 decoding parameters + * + * @dpb: decoded picture buffer. + * @nal_ref_idc: slice header syntax element. + * @frame_num: slice header syntax element. + * @top_field_order_cnt: matches TopFieldOrderCnt picture value. + * @bottom_field_order_cnt: matches BottomFieldOrderCnt picture value. + * Note that picture field is indicated by v4l2_buffer.field. + * @idr_pic_id: slice header syntax element. + * @pic_order_cnt_lsb: slice header syntax element. + * @delta_pic_order_cnt_bottom: slice header syntax element. + * @delta_pic_order_cnt0: slice header syntax element. + * @delta_pic_order_cnt1: slice header syntax element. + * @dec_ref_pic_marking_bit_size: size in bits of dec_ref_pic_marking() + * syntax element. + * @pic_order_cnt_bit_size: size in bits of pic order count syntax. + * @slice_group_change_cycle: slice header syntax element. + * @reserved: padding field. Should be zeroed by applications. + * @flags: see V4L2_H264_DECODE_PARAM_FLAG_{}. + */ +struct v4l2_ctrl_h264_decode_params { + struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES]; + __u16 nal_ref_idc; + __u16 frame_num; + __s32 top_field_order_cnt; + __s32 bottom_field_order_cnt; + __u16 idr_pic_id; + __u16 pic_order_cnt_lsb; + __s32 delta_pic_order_cnt_bottom; + __s32 delta_pic_order_cnt0; + __s32 delta_pic_order_cnt1; + __u32 dec_ref_pic_marking_bit_size; + __u32 pic_order_cnt_bit_size; + __u32 slice_group_change_cycle; + + __u32 reserved; + __u32 flags; +}; + + +/* Stateless FWHT control, used by the vicodec driver */ + +/* Current FWHT version */ +#define V4L2_FWHT_VERSION 3 + +/* Set if this is an interlaced format */ +#define V4L2_FWHT_FL_IS_INTERLACED _BITUL(0) +/* Set if this is a bottom-first (NTSC) interlaced format */ +#define V4L2_FWHT_FL_IS_BOTTOM_FIRST _BITUL(1) +/* Set if each 'frame' contains just one field */ +#define V4L2_FWHT_FL_IS_ALTERNATE _BITUL(2) +/* + * If V4L2_FWHT_FL_IS_ALTERNATE was set, then this is set if this + * 'frame' is the bottom field, else it is the top field. + */ +#define V4L2_FWHT_FL_IS_BOTTOM_FIELD _BITUL(3) +/* Set if the Y' plane is uncompressed */ +#define V4L2_FWHT_FL_LUMA_IS_UNCOMPRESSED _BITUL(4) +/* Set if the Cb plane is uncompressed */ +#define V4L2_FWHT_FL_CB_IS_UNCOMPRESSED _BITUL(5) +/* Set if the Cr plane is uncompressed */ +#define V4L2_FWHT_FL_CR_IS_UNCOMPRESSED _BITUL(6) +/* Set if the chroma plane is full height, if cleared it is half height */ +#define V4L2_FWHT_FL_CHROMA_FULL_HEIGHT _BITUL(7) +/* Set if the chroma plane is full width, if cleared it is half width */ +#define V4L2_FWHT_FL_CHROMA_FULL_WIDTH _BITUL(8) +/* Set if the alpha plane is uncompressed */ +#define V4L2_FWHT_FL_ALPHA_IS_UNCOMPRESSED _BITUL(9) +/* Set if this is an I Frame */ +#define V4L2_FWHT_FL_I_FRAME _BITUL(10) + +/* A 4-values flag - the number of components - 1 */ +#define V4L2_FWHT_FL_COMPONENTS_NUM_MSK GENMASK(18, 16) +#define V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET 16 + +/* A 4-values flag - the pixel encoding type */ +#define V4L2_FWHT_FL_PIXENC_MSK GENMASK(20, 19) +#define V4L2_FWHT_FL_PIXENC_OFFSET 19 +#define V4L2_FWHT_FL_PIXENC_YUV (1 << V4L2_FWHT_FL_PIXENC_OFFSET) +#define V4L2_FWHT_FL_PIXENC_RGB (2 << V4L2_FWHT_FL_PIXENC_OFFSET) +#define V4L2_FWHT_FL_PIXENC_HSV (3 << V4L2_FWHT_FL_PIXENC_OFFSET) + +#define V4L2_CID_STATELESS_FWHT_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 100) +/** + * struct v4l2_ctrl_fwht_params - FWHT parameters + * + * @backward_ref_ts: timestamp of the V4L2 capture buffer to use as reference. + * The timestamp refers to the timestamp field in struct v4l2_buffer. + * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64. + * @version: must be V4L2_FWHT_VERSION. + * @width: width of frame. + * @height: height of frame. + * @flags: FWHT flags (see V4L2_FWHT_FL_*). + * @colorspace: the colorspace (enum v4l2_colorspace). + * @xfer_func: the transfer function (enum v4l2_xfer_func). + * @ycbcr_enc: the Y'CbCr encoding (enum v4l2_ycbcr_encoding). + * @quantization: the quantization (enum v4l2_quantization). + */ +struct v4l2_ctrl_fwht_params { + __u64 backward_ref_ts; + __u32 version; + __u32 width; + __u32 height; + __u32 flags; + __u32 colorspace; + __u32 xfer_func; + __u32 ycbcr_enc; + __u32 quantization; +}; + +/* Stateless VP8 control */ + +#define V4L2_VP8_SEGMENT_FLAG_ENABLED 0x01 +#define V4L2_VP8_SEGMENT_FLAG_UPDATE_MAP 0x02 +#define V4L2_VP8_SEGMENT_FLAG_UPDATE_FEATURE_DATA 0x04 +#define V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE 0x08 + +/** + * struct v4l2_vp8_segment - VP8 segment-based adjustments parameters + * + * @quant_update: update values for the segment quantizer. + * @lf_update: update values for the loop filter level. + * @segment_probs: branch probabilities of the segment_id decoding tree. + * @padding: padding field. Should be zeroed by applications. + * @flags: see V4L2_VP8_SEGMENT_FLAG_{}. + * + * This structure contains segment-based adjustments related parameters. + * See the 'update_segmentation()' part of the frame header syntax, + * and section '9.3. Segment-Based Adjustments' of the VP8 specification + * for more details. + */ +struct v4l2_vp8_segment { + __s8 quant_update[4]; + __s8 lf_update[4]; + __u8 segment_probs[3]; + __u8 padding; + __u32 flags; +}; + +#define V4L2_VP8_LF_ADJ_ENABLE 0x01 +#define V4L2_VP8_LF_DELTA_UPDATE 0x02 +#define V4L2_VP8_LF_FILTER_TYPE_SIMPLE 0x04 + +/** + * struct v4l2_vp8_loop_filter - VP8 loop filter parameters + * + * @ref_frm_delta: Reference frame signed delta values. + * @mb_mode_delta: MB prediction mode signed delta values. + * @sharpness_level: matches sharpness_level syntax element. + * @level: matches loop_filter_level syntax element. + * @padding: padding field. Should be zeroed by applications. + * @flags: see V4L2_VP8_LF_{}. + * + * This structure contains loop filter related parameters. + * See the 'mb_lf_adjustments()' part of the frame header syntax, + * and section '9.4. Loop Filter Type and Levels' of the VP8 specification + * for more details. + */ +struct v4l2_vp8_loop_filter { + __s8 ref_frm_delta[4]; + __s8 mb_mode_delta[4]; + __u8 sharpness_level; + __u8 level; + __u16 padding; + __u32 flags; +}; + +/** + * struct v4l2_vp8_quantization - VP8 quantizattion indices + * + * @y_ac_qi: luma AC coefficient table index. + * @y_dc_delta: luma DC delta vaue. + * @y2_dc_delta: y2 block DC delta value. + * @y2_ac_delta: y2 block AC delta value. + * @uv_dc_delta: chroma DC delta value. + * @uv_ac_delta: chroma AC delta value. + * @padding: padding field. Should be zeroed by applications. + * + * This structure contains the quantization indices present + * in 'quant_indices()' part of the frame header syntax. + * See section '9.6. Dequantization Indices' of the VP8 specification + * for more details. + */ +struct v4l2_vp8_quantization { + __u8 y_ac_qi; + __s8 y_dc_delta; + __s8 y2_dc_delta; + __s8 y2_ac_delta; + __s8 uv_dc_delta; + __s8 uv_ac_delta; + __u16 padding; +}; + +#define V4L2_VP8_COEFF_PROB_CNT 11 +#define V4L2_VP8_MV_PROB_CNT 19 + +/** + * struct v4l2_vp8_entropy - VP8 update probabilities + * + * @coeff_probs: coefficient probability update values. + * @y_mode_probs: luma intra-prediction probabilities. + * @uv_mode_probs: chroma intra-prediction probabilities. + * @mv_probs: mv decoding probability. + * @padding: padding field. Should be zeroed by applications. + * + * This structure contains the update probabilities present in + * 'token_prob_update()' and 'mv_prob_update()' part of the frame header. + * See section '17.2. Probability Updates' of the VP8 specification + * for more details. + */ +struct v4l2_vp8_entropy { + __u8 coeff_probs[4][8][3][V4L2_VP8_COEFF_PROB_CNT]; + __u8 y_mode_probs[4]; + __u8 uv_mode_probs[3]; + __u8 mv_probs[2][V4L2_VP8_MV_PROB_CNT]; + __u8 padding[3]; +}; + +/** + * struct v4l2_vp8_entropy_coder_state - VP8 boolean coder state + * + * @range: coder state value for "Range" + * @value: coder state value for "Value" + * @bit_count: number of bits left in range "Value". + * @padding: padding field. Should be zeroed by applications. + * + * This structure contains the state for the boolean coder, as + * explained in section '7. Boolean Entropy Decoder' of the VP8 specification. + */ +struct v4l2_vp8_entropy_coder_state { + __u8 range; + __u8 value; + __u8 bit_count; + __u8 padding; +}; + +#define V4L2_VP8_FRAME_FLAG_KEY_FRAME 0x01 +#define V4L2_VP8_FRAME_FLAG_EXPERIMENTAL 0x02 +#define V4L2_VP8_FRAME_FLAG_SHOW_FRAME 0x04 +#define V4L2_VP8_FRAME_FLAG_MB_NO_SKIP_COEFF 0x08 +#define V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN 0x10 +#define V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT 0x20 + +#define V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) \ + (!!((hdr)->flags & V4L2_VP8_FRAME_FLAG_KEY_FRAME)) + +#define V4L2_CID_STATELESS_VP8_FRAME (V4L2_CID_CODEC_STATELESS_BASE + 200) +/** + * struct v4l2_ctrl_vp8_frame - VP8 frame parameters + * + * @segment: segmentation parameters. See &v4l2_vp8_segment for more details + * @lf: loop filter parameters. See &v4l2_vp8_loop_filter for more details + * @quant: quantization parameters. See &v4l2_vp8_quantization for more details + * @entropy: update probabilities. See &v4l2_vp8_entropy for more details + * @coder_state: boolean coder state. See &v4l2_vp8_entropy_coder_state for more details + * @width: frame width. + * @height: frame height. + * @horizontal_scale: horizontal scaling factor. + * @vertical_scale: vertical scaling factor. + * @version: bitstream version. + * @prob_skip_false: frame header syntax element. + * @prob_intra: frame header syntax element. + * @prob_last: frame header syntax element. + * @prob_gf: frame header syntax element. + * @num_dct_parts: number of DCT coefficients partitions. + * @first_part_size: size of the first partition, i.e. the control partition. + * @first_part_header_bits: size in bits of the first partition header portion. + * @dct_part_sizes: DCT coefficients sizes. + * @last_frame_ts: "last" reference buffer timestamp. + * The timestamp refers to the timestamp field in struct v4l2_buffer. + * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64. + * @golden_frame_ts: "golden" reference buffer timestamp. + * @alt_frame_ts: "alt" reference buffer timestamp. + * @flags: see V4L2_VP8_FRAME_FLAG_{}. + */ +struct v4l2_ctrl_vp8_frame { + struct v4l2_vp8_segment segment; + struct v4l2_vp8_loop_filter lf; + struct v4l2_vp8_quantization quant; + struct v4l2_vp8_entropy entropy; + struct v4l2_vp8_entropy_coder_state coder_state; + + __u16 width; + __u16 height; + + __u8 horizontal_scale; + __u8 vertical_scale; + + __u8 version; + __u8 prob_skip_false; + __u8 prob_intra; + __u8 prob_last; + __u8 prob_gf; + __u8 num_dct_parts; + + __u32 first_part_size; + __u32 first_part_header_bits; + __u32 dct_part_sizes[8]; + + __u64 last_frame_ts; + __u64 golden_frame_ts; + __u64 alt_frame_ts; + + __u64 flags; +}; + +/* Stateless MPEG-2 controls */ + +#define V4L2_MPEG2_SEQ_FLAG_PROGRESSIVE 0x01 + +#define V4L2_CID_STATELESS_MPEG2_SEQUENCE (V4L2_CID_CODEC_STATELESS_BASE+220) +/** + * struct v4l2_ctrl_mpeg2_sequence - MPEG-2 sequence header + * + * All the members on this structure match the sequence header and sequence + * extension syntaxes as specified by the MPEG-2 specification. + * + * Fields horizontal_size, vertical_size and vbv_buffer_size are a + * combination of respective _value and extension syntax elements, + * as described in section 6.3.3 "Sequence header". + * + * @horizontal_size: combination of elements horizontal_size_value and + * horizontal_size_extension. + * @vertical_size: combination of elements vertical_size_value and + * vertical_size_extension. + * @vbv_buffer_size: combination of elements vbv_buffer_size_value and + * vbv_buffer_size_extension. + * @profile_and_level_indication: see MPEG-2 specification. + * @chroma_format: see MPEG-2 specification. + * @flags: see V4L2_MPEG2_SEQ_FLAG_{}. + */ +struct v4l2_ctrl_mpeg2_sequence { + __u16 horizontal_size; + __u16 vertical_size; + __u32 vbv_buffer_size; + __u16 profile_and_level_indication; + __u8 chroma_format; + __u8 flags; +}; + +#define V4L2_MPEG2_PIC_CODING_TYPE_I 1 +#define V4L2_MPEG2_PIC_CODING_TYPE_P 2 +#define V4L2_MPEG2_PIC_CODING_TYPE_B 3 +#define V4L2_MPEG2_PIC_CODING_TYPE_D 4 + +#define V4L2_MPEG2_PIC_TOP_FIELD 0x1 +#define V4L2_MPEG2_PIC_BOTTOM_FIELD 0x2 +#define V4L2_MPEG2_PIC_FRAME 0x3 + +#define V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST 0x0001 +#define V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT 0x0002 +#define V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV 0x0004 +#define V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE 0x0008 +#define V4L2_MPEG2_PIC_FLAG_INTRA_VLC 0x0010 +#define V4L2_MPEG2_PIC_FLAG_ALT_SCAN 0x0020 +#define V4L2_MPEG2_PIC_FLAG_REPEAT_FIRST 0x0040 +#define V4L2_MPEG2_PIC_FLAG_PROGRESSIVE 0x0080 + +#define V4L2_CID_STATELESS_MPEG2_PICTURE (V4L2_CID_CODEC_STATELESS_BASE+221) +/** + * struct v4l2_ctrl_mpeg2_picture - MPEG-2 picture header + * + * All the members on this structure match the picture header and picture + * coding extension syntaxes as specified by the MPEG-2 specification. + * + * @backward_ref_ts: timestamp of the V4L2 capture buffer to use as + * reference for backward prediction. + * @forward_ref_ts: timestamp of the V4L2 capture buffer to use as + * reference for forward prediction. These timestamp refers to the + * timestamp field in struct v4l2_buffer. Use v4l2_timeval_to_ns() + * to convert the struct timeval to a __u64. + * @flags: see V4L2_MPEG2_PIC_FLAG_{}. + * @f_code: see MPEG-2 specification. + * @picture_coding_type: see MPEG-2 specification. + * @picture_structure: see V4L2_MPEG2_PIC_{}_FIELD. + * @intra_dc_precision: see MPEG-2 specification. + * @reserved: padding field. Should be zeroed by applications. + */ +struct v4l2_ctrl_mpeg2_picture { + __u64 backward_ref_ts; + __u64 forward_ref_ts; + __u32 flags; + __u8 f_code[2][2]; + __u8 picture_coding_type; + __u8 picture_structure; + __u8 intra_dc_precision; + __u8 reserved[5]; +}; + +#define V4L2_CID_STATELESS_MPEG2_QUANTISATION (V4L2_CID_CODEC_STATELESS_BASE+222) +/** + * struct v4l2_ctrl_mpeg2_quantisation - MPEG-2 quantisation + * + * Quantisation matrices as specified by section 6.3.7 + * "Quant matrix extension". + * + * @intra_quantiser_matrix: The quantisation matrix coefficients + * for intra-coded frames, in zigzag scanning order. It is relevant + * for both luma and chroma components, although it can be superseded + * by the chroma-specific matrix for non-4:2:0 YUV formats. + * @non_intra_quantiser_matrix: The quantisation matrix coefficients + * for non-intra-coded frames, in zigzag scanning order. It is relevant + * for both luma and chroma components, although it can be superseded + * by the chroma-specific matrix for non-4:2:0 YUV formats. + * @chroma_intra_quantiser_matrix: The quantisation matrix coefficients + * for the chominance component of intra-coded frames, in zigzag scanning + * order. Only relevant for 4:2:2 and 4:4:4 YUV formats. + * @chroma_non_intra_quantiser_matrix: The quantisation matrix coefficients + * for the chrominance component of non-intra-coded frames, in zigzag scanning + * order. Only relevant for 4:2:2 and 4:4:4 YUV formats. + */ +struct v4l2_ctrl_mpeg2_quantisation { + __u8 intra_quantiser_matrix[64]; + __u8 non_intra_quantiser_matrix[64]; + __u8 chroma_intra_quantiser_matrix[64]; + __u8 chroma_non_intra_quantiser_matrix[64]; +}; + +#define V4L2_CID_STATELESS_HEVC_SPS (V4L2_CID_CODEC_STATELESS_BASE + 400) +#define V4L2_CID_STATELESS_HEVC_PPS (V4L2_CID_CODEC_STATELESS_BASE + 401) +#define V4L2_CID_STATELESS_HEVC_SLICE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 402) +#define V4L2_CID_STATELESS_HEVC_SCALING_MATRIX (V4L2_CID_CODEC_STATELESS_BASE + 403) +#define V4L2_CID_STATELESS_HEVC_DECODE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 404) +#define V4L2_CID_STATELESS_HEVC_DECODE_MODE (V4L2_CID_CODEC_STATELESS_BASE + 405) +#define V4L2_CID_STATELESS_HEVC_START_CODE (V4L2_CID_CODEC_STATELESS_BASE + 406) +#define V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS (V4L2_CID_CODEC_STATELESS_BASE + 407) + +enum v4l2_stateless_hevc_decode_mode { + V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED, + V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED, +}; + +enum v4l2_stateless_hevc_start_code { + V4L2_STATELESS_HEVC_START_CODE_NONE, + V4L2_STATELESS_HEVC_START_CODE_ANNEX_B, +}; + +#define V4L2_HEVC_SLICE_TYPE_B 0 +#define V4L2_HEVC_SLICE_TYPE_P 1 +#define V4L2_HEVC_SLICE_TYPE_I 2 + +#define V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE (1ULL << 0) +#define V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED (1ULL << 1) +#define V4L2_HEVC_SPS_FLAG_AMP_ENABLED (1ULL << 2) +#define V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET (1ULL << 3) +#define V4L2_HEVC_SPS_FLAG_PCM_ENABLED (1ULL << 4) +#define V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED (1ULL << 5) +#define V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT (1ULL << 6) +#define V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED (1ULL << 7) +#define V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED (1ULL << 8) + +/** + * struct v4l2_ctrl_hevc_sps - ITU-T Rec. H.265: Sequence parameter set + * + * @video_parameter_set_id: specifies the value of the + * vps_video_parameter_set_id of the active VPS + * @seq_parameter_set_id: provides an identifier for the SPS for + * reference by other syntax elements + * @pic_width_in_luma_samples: specifies the width of each decoded picture + * in units of luma samples + * @pic_height_in_luma_samples: specifies the height of each decoded picture + * in units of luma samples + * @bit_depth_luma_minus8: this value plus 8specifies the bit depth of the + * samples of the luma array + * @bit_depth_chroma_minus8: this value plus 8 specifies the bit depth of the + * samples of the chroma arrays + * @log2_max_pic_order_cnt_lsb_minus4: this value plus 4 specifies the value of + * the variable MaxPicOrderCntLsb + * @sps_max_dec_pic_buffering_minus1: this value plus 1 specifies the maximum + * required size of the decoded picture + * buffer for the codec video sequence + * @sps_max_num_reorder_pics: indicates the maximum allowed number of pictures + * @sps_max_latency_increase_plus1: not equal to 0 is used to compute the + * value of SpsMaxLatencyPictures array + * @log2_min_luma_coding_block_size_minus3: plus 3 specifies the minimum + * luma coding block size + * @log2_diff_max_min_luma_coding_block_size: specifies the difference between + * the maximum and minimum luma + * coding block size + * @log2_min_luma_transform_block_size_minus2: plus 2 specifies the minimum luma + * transform block size + * @log2_diff_max_min_luma_transform_block_size: specifies the difference between + * the maximum and minimum luma + * transform block size + * @max_transform_hierarchy_depth_inter: specifies the maximum hierarchy + * depth for transform units of + * coding units coded in inter + * prediction mode + * @max_transform_hierarchy_depth_intra: specifies the maximum hierarchy + * depth for transform units of + * coding units coded in intra + * prediction mode + * @pcm_sample_bit_depth_luma_minus1: this value plus 1 specifies the number of + * bits used to represent each of PCM sample + * values of the luma component + * @pcm_sample_bit_depth_chroma_minus1: this value plus 1 specifies the number + * of bits used to represent each of PCM + * sample values of the chroma components + * @log2_min_pcm_luma_coding_block_size_minus3: this value plus 3 specifies the + * minimum size of coding blocks + * @log2_diff_max_min_pcm_luma_coding_block_size: specifies the difference between + * the maximum and minimum size of + * coding blocks + * @num_short_term_ref_pic_sets: specifies the number of st_ref_pic_set() + * syntax structures included in the SPS + * @num_long_term_ref_pics_sps: specifies the number of candidate long-term + * reference pictures that are specified in the SPS + * @chroma_format_idc: specifies the chroma sampling + * @sps_max_sub_layers_minus1: this value plus 1 specifies the maximum number + * of temporal sub-layers + * @reserved: padding field. Should be zeroed by applications. + * @flags: see V4L2_HEVC_SPS_FLAG_{} + */ +struct v4l2_ctrl_hevc_sps { + __u8 video_parameter_set_id; + __u8 seq_parameter_set_id; + __u16 pic_width_in_luma_samples; + __u16 pic_height_in_luma_samples; + __u8 bit_depth_luma_minus8; + __u8 bit_depth_chroma_minus8; + __u8 log2_max_pic_order_cnt_lsb_minus4; + __u8 sps_max_dec_pic_buffering_minus1; + __u8 sps_max_num_reorder_pics; + __u8 sps_max_latency_increase_plus1; + __u8 log2_min_luma_coding_block_size_minus3; + __u8 log2_diff_max_min_luma_coding_block_size; + __u8 log2_min_luma_transform_block_size_minus2; + __u8 log2_diff_max_min_luma_transform_block_size; + __u8 max_transform_hierarchy_depth_inter; + __u8 max_transform_hierarchy_depth_intra; + __u8 pcm_sample_bit_depth_luma_minus1; + __u8 pcm_sample_bit_depth_chroma_minus1; + __u8 log2_min_pcm_luma_coding_block_size_minus3; + __u8 log2_diff_max_min_pcm_luma_coding_block_size; + __u8 num_short_term_ref_pic_sets; + __u8 num_long_term_ref_pics_sps; + __u8 chroma_format_idc; + __u8 sps_max_sub_layers_minus1; + + __u8 reserved[6]; + __u64 flags; +}; + +#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED (1ULL << 0) +#define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1) +#define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2) +#define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3) +#define V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED (1ULL << 4) +#define V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED (1ULL << 5) +#define V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED (1ULL << 6) +#define V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT (1ULL << 7) +#define V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED (1ULL << 8) +#define V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED (1ULL << 9) +#define V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED (1ULL << 10) +#define V4L2_HEVC_PPS_FLAG_TILES_ENABLED (1ULL << 11) +#define V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED (1ULL << 12) +#define V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED (1ULL << 13) +#define V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 14) +#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED (1ULL << 15) +#define V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER (1ULL << 16) +#define V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT (1ULL << 17) +#define V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT (1ULL << 18) +#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT (1ULL << 19) +#define V4L2_HEVC_PPS_FLAG_UNIFORM_SPACING (1ULL << 20) + +/** + * struct v4l2_ctrl_hevc_pps - ITU-T Rec. H.265: Picture parameter set + * + * @pic_parameter_set_id: identifies the PPS for reference by other + * syntax elements + * @num_extra_slice_header_bits: specifies the number of extra slice header + * bits that are present in the slice header RBSP + * for coded pictures referring to the PPS. + * @num_ref_idx_l0_default_active_minus1: this value plus 1 specifies the + * inferred value of num_ref_idx_l0_active_minus1 + * @num_ref_idx_l1_default_active_minus1: this value plus 1 specifies the + * inferred value of num_ref_idx_l1_active_minus1 + * @init_qp_minus26: this value plus 26 specifies the initial value of SliceQp Y for + * each slice referring to the PPS + * @diff_cu_qp_delta_depth: specifies the difference between the luma coding + * tree block size and the minimum luma coding block + * size of coding units that convey cu_qp_delta_abs + * and cu_qp_delta_sign_flag + * @pps_cb_qp_offset: specify the offsets to the luma quantization parameter Cb + * @pps_cr_qp_offset: specify the offsets to the luma quantization parameter Cr + * @num_tile_columns_minus1: this value plus 1 specifies the number of tile columns + * partitioning the picture + * @num_tile_rows_minus1: this value plus 1 specifies the number of tile rows partitioning + * the picture + * @column_width_minus1: this value plus 1 specifies the width of the each tile column in + * units of coding tree blocks + * @row_height_minus1: this value plus 1 specifies the height of the each tile row in + * units of coding tree blocks + * @pps_beta_offset_div2: specify the default deblocking parameter offsets for + * beta divided by 2 + * @pps_tc_offset_div2: specify the default deblocking parameter offsets for tC + * divided by 2 + * @log2_parallel_merge_level_minus2: this value plus 2 specifies the value of + * the variable Log2ParMrgLevel + * @reserved: padding field. Should be zeroed by applications. + * @flags: see V4L2_HEVC_PPS_FLAG_{} + */ +struct v4l2_ctrl_hevc_pps { + __u8 pic_parameter_set_id; + __u8 num_extra_slice_header_bits; + __u8 num_ref_idx_l0_default_active_minus1; + __u8 num_ref_idx_l1_default_active_minus1; + __s8 init_qp_minus26; + __u8 diff_cu_qp_delta_depth; + __s8 pps_cb_qp_offset; + __s8 pps_cr_qp_offset; + __u8 num_tile_columns_minus1; + __u8 num_tile_rows_minus1; + __u8 column_width_minus1[20]; + __u8 row_height_minus1[22]; + __s8 pps_beta_offset_div2; + __s8 pps_tc_offset_div2; + __u8 log2_parallel_merge_level_minus2; + __u8 reserved; + __u64 flags; +}; + +#define V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE 0x01 + +#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME 0 +#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_FIELD 1 +#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_FIELD 2 +#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_BOTTOM 3 +#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_TOP 4 +#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_BOTTOM_TOP 5 +#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM 6 +#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME_DOUBLING 7 +#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME_TRIPLING 8 +#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_PAIRED_PREVIOUS_BOTTOM 9 +#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_PAIRED_PREVIOUS_TOP 10 +#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_PAIRED_NEXT_BOTTOM 11 +#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_PAIRED_NEXT_TOP 12 + +#define V4L2_HEVC_DPB_ENTRIES_NUM_MAX 16 + +/** + * struct v4l2_hevc_dpb_entry - HEVC decoded picture buffer entry + * + * @timestamp: timestamp of the V4L2 capture buffer to use as reference. + * @flags: long term flag for the reference frame + * @field_pic: whether the reference is a field picture or a frame. + * @reserved: padding field. Should be zeroed by applications. + * @pic_order_cnt_val: the picture order count of the current picture. + */ +struct v4l2_hevc_dpb_entry { + __u64 timestamp; + __u8 flags; + __u8 field_pic; + __u16 reserved; + __s32 pic_order_cnt_val; +}; + +/** + * struct v4l2_hevc_pred_weight_table - HEVC weighted prediction parameters + * + * @delta_luma_weight_l0: the difference of the weighting factor applied + * to the luma prediction value for list 0 + * @luma_offset_l0: the additive offset applied to the luma prediction value + * for list 0 + * @delta_chroma_weight_l0: the difference of the weighting factor applied + * to the chroma prediction values for list 0 + * @chroma_offset_l0: the difference of the additive offset applied to + * the chroma prediction values for list 0 + * @delta_luma_weight_l1: the difference of the weighting factor applied + * to the luma prediction value for list 1 + * @luma_offset_l1: the additive offset applied to the luma prediction value + * for list 1 + * @delta_chroma_weight_l1: the difference of the weighting factor applied + * to the chroma prediction values for list 1 + * @chroma_offset_l1: the difference of the additive offset applied to + * the chroma prediction values for list 1 + * @luma_log2_weight_denom: the base 2 logarithm of the denominator for + * all luma weighting factors + * @delta_chroma_log2_weight_denom: the difference of the base 2 logarithm + * of the denominator for all chroma + * weighting factors + */ +struct v4l2_hevc_pred_weight_table { + __s8 delta_luma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __s8 luma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __s8 delta_chroma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]; + __s8 chroma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]; + + __s8 delta_luma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __s8 luma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __s8 delta_chroma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]; + __s8 chroma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]; + + __u8 luma_log2_weight_denom; + __s8 delta_chroma_log2_weight_denom; +}; + +#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA (1ULL << 0) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA (1ULL << 1) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED (1ULL << 2) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO (1ULL << 3) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT (1ULL << 4) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0 (1ULL << 5) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV (1ULL << 6) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 9) + +/** + * struct v4l2_ctrl_hevc_slice_params - HEVC slice parameters + * + * This control is a dynamically sized 1-dimensional array, + * V4L2_CTRL_FLAG_DYNAMIC_ARRAY flag must be set when using it. + * + * @bit_size: size (in bits) of the current slice data + * @data_byte_offset: offset (in bytes) to the video data in the current slice data + * @num_entry_point_offsets: specifies the number of entry point offset syntax + * elements in the slice header. + * @nal_unit_type: specifies the coding type of the slice (B, P or I) + * @nuh_temporal_id_plus1: minus 1 specifies a temporal identifier for the NAL unit + * @slice_type: see V4L2_HEVC_SLICE_TYPE_{} + * @colour_plane_id: specifies the colour plane associated with the current slice + * @slice_pic_order_cnt: specifies the picture order count + * @num_ref_idx_l0_active_minus1: this value plus 1 specifies the maximum + * reference index for reference picture list 0 + * that may be used to decode the slice + * @num_ref_idx_l1_active_minus1: this value plus 1 specifies the maximum + * reference index for reference picture list 1 + * that may be used to decode the slice + * @collocated_ref_idx: specifies the reference index of the collocated picture used + * for temporal motion vector prediction + * @five_minus_max_num_merge_cand: specifies the maximum number of merging + * motion vector prediction candidates supported in + * the slice subtracted from 5 + * @slice_qp_delta: specifies the initial value of QpY to be used for the coding + * blocks in the slice + * @slice_cb_qp_offset: specifies a difference to be added to the value of pps_cb_qp_offset + * @slice_cr_qp_offset: specifies a difference to be added to the value of pps_cr_qp_offset + * @slice_act_y_qp_offset: screen content extension parameters + * @slice_act_cb_qp_offset: screen content extension parameters + * @slice_act_cr_qp_offset: screen content extension parameters + * @slice_beta_offset_div2: specify the deblocking parameter offsets for beta divided by 2 + * @slice_tc_offset_div2: specify the deblocking parameter offsets for tC divided by 2 + * @pic_struct: indicates whether a picture should be displayed as a frame or as one or + * more fields + * @reserved0: padding field. Should be zeroed by applications. + * @slice_segment_addr: specifies the address of the first coding tree block in + * the slice segment + * @ref_idx_l0: the list of L0 reference elements as indices in the DPB + * @ref_idx_l1: the list of L1 reference elements as indices in the DPB + * @short_term_ref_pic_set_size: specifies the size of short-term reference + * pictures set included in the SPS + * @long_term_ref_pic_set_size: specifies the size of long-term reference + * pictures set include in the SPS + * @pred_weight_table: the prediction weight coefficients for inter-picture + * prediction + * @reserved1: padding field. Should be zeroed by applications. + * @flags: see V4L2_HEVC_SLICE_PARAMS_FLAG_{} + */ +struct v4l2_ctrl_hevc_slice_params { + __u32 bit_size; + __u32 data_byte_offset; + __u32 num_entry_point_offsets; + + /* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */ + __u8 nal_unit_type; + __u8 nuh_temporal_id_plus1; + + /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ + __u8 slice_type; + __u8 colour_plane_id; + __s32 slice_pic_order_cnt; + __u8 num_ref_idx_l0_active_minus1; + __u8 num_ref_idx_l1_active_minus1; + __u8 collocated_ref_idx; + __u8 five_minus_max_num_merge_cand; + __s8 slice_qp_delta; + __s8 slice_cb_qp_offset; + __s8 slice_cr_qp_offset; + __s8 slice_act_y_qp_offset; + __s8 slice_act_cb_qp_offset; + __s8 slice_act_cr_qp_offset; + __s8 slice_beta_offset_div2; + __s8 slice_tc_offset_div2; + + /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture timing SEI message */ + __u8 pic_struct; + + __u8 reserved0[3]; + /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ + __u32 slice_segment_addr; + __u8 ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u8 ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u16 short_term_ref_pic_set_size; + __u16 long_term_ref_pic_set_size; + + /* ISO/IEC 23008-2, ITU-T Rec. H.265: Weighted prediction parameter */ + struct v4l2_hevc_pred_weight_table pred_weight_table; + + __u8 reserved1[2]; + __u64 flags; +}; + +#define V4L2_HEVC_DECODE_PARAM_FLAG_IRAP_PIC 0x1 +#define V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC 0x2 +#define V4L2_HEVC_DECODE_PARAM_FLAG_NO_OUTPUT_OF_PRIOR 0x4 + +/** + * struct v4l2_ctrl_hevc_decode_params - HEVC decode parameters + * + * @pic_order_cnt_val: picture order count + * @short_term_ref_pic_set_size: specifies the size of short-term reference + * pictures set included in the SPS of the first slice + * @long_term_ref_pic_set_size: specifies the size of long-term reference + * pictures set include in the SPS of the first slice + * @num_active_dpb_entries: the number of entries in dpb + * @num_poc_st_curr_before: the number of reference pictures in the short-term + * set that come before the current frame + * @num_poc_st_curr_after: the number of reference pictures in the short-term + * set that come after the current frame + * @num_poc_lt_curr: the number of reference pictures in the long-term set + * @poc_st_curr_before: provides the index of the short term before references + * in DPB array + * @poc_st_curr_after: provides the index of the short term after references + * in DPB array + * @poc_lt_curr: provides the index of the long term references in DPB array + * @num_delta_pocs_of_ref_rps_idx: same as the derived value NumDeltaPocs[RefRpsIdx], + * can be used to parse the RPS data in slice headers + * instead of skipping it with @short_term_ref_pic_set_size. + * @reserved: padding field. Should be zeroed by applications. + * @dpb: the decoded picture buffer, for meta-data about reference frames + * @flags: see V4L2_HEVC_DECODE_PARAM_FLAG_{} + */ +struct v4l2_ctrl_hevc_decode_params { + __s32 pic_order_cnt_val; + __u16 short_term_ref_pic_set_size; + __u16 long_term_ref_pic_set_size; + __u8 num_active_dpb_entries; + __u8 num_poc_st_curr_before; + __u8 num_poc_st_curr_after; + __u8 num_poc_lt_curr; + __u8 poc_st_curr_before[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u8 poc_st_curr_after[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u8 poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u8 num_delta_pocs_of_ref_rps_idx; + __u8 reserved[3]; + struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u64 flags; +}; + +/** + * struct v4l2_ctrl_hevc_scaling_matrix - HEVC scaling lists parameters + * + * @scaling_list_4x4: scaling list is used for the scaling process for + * transform coefficients. The values on each scaling + * list are expected in raster scan order + * @scaling_list_8x8: scaling list is used for the scaling process for + * transform coefficients. The values on each scaling + * list are expected in raster scan order + * @scaling_list_16x16: scaling list is used for the scaling process for + * transform coefficients. The values on each scaling + * list are expected in raster scan order + * @scaling_list_32x32: scaling list is used for the scaling process for + * transform coefficients. The values on each scaling + * list are expected in raster scan order + * @scaling_list_dc_coef_16x16: scaling list is used for the scaling process + * for transform coefficients. The values on each + * scaling list are expected in raster scan order. + * @scaling_list_dc_coef_32x32: scaling list is used for the scaling process + * for transform coefficients. The values on each + * scaling list are expected in raster scan order. + */ +struct v4l2_ctrl_hevc_scaling_matrix { + __u8 scaling_list_4x4[6][16]; + __u8 scaling_list_8x8[6][64]; + __u8 scaling_list_16x16[6][64]; + __u8 scaling_list_32x32[2][64]; + __u8 scaling_list_dc_coef_16x16[6]; + __u8 scaling_list_dc_coef_32x32[2]; +}; + +#define V4L2_CID_COLORIMETRY_CLASS_BASE (V4L2_CTRL_CLASS_COLORIMETRY | 0x900) +#define V4L2_CID_COLORIMETRY_CLASS (V4L2_CTRL_CLASS_COLORIMETRY | 1) + +#define V4L2_CID_COLORIMETRY_HDR10_CLL_INFO (V4L2_CID_COLORIMETRY_CLASS_BASE + 0) + +struct v4l2_ctrl_hdr10_cll_info { + __u16 max_content_light_level; + __u16 max_pic_average_light_level; +}; + +#define V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY (V4L2_CID_COLORIMETRY_CLASS_BASE + 1) + +#define V4L2_HDR10_MASTERING_PRIMARIES_X_LOW 5 +#define V4L2_HDR10_MASTERING_PRIMARIES_X_HIGH 37000 +#define V4L2_HDR10_MASTERING_PRIMARIES_Y_LOW 5 +#define V4L2_HDR10_MASTERING_PRIMARIES_Y_HIGH 42000 +#define V4L2_HDR10_MASTERING_WHITE_POINT_X_LOW 5 +#define V4L2_HDR10_MASTERING_WHITE_POINT_X_HIGH 37000 +#define V4L2_HDR10_MASTERING_WHITE_POINT_Y_LOW 5 +#define V4L2_HDR10_MASTERING_WHITE_POINT_Y_HIGH 42000 +#define V4L2_HDR10_MASTERING_MAX_LUMA_LOW 50000 +#define V4L2_HDR10_MASTERING_MAX_LUMA_HIGH 100000000 +#define V4L2_HDR10_MASTERING_MIN_LUMA_LOW 1 +#define V4L2_HDR10_MASTERING_MIN_LUMA_HIGH 50000 + +struct v4l2_ctrl_hdr10_mastering_display { + __u16 display_primaries_x[3]; + __u16 display_primaries_y[3]; + __u16 white_point_x; + __u16 white_point_y; + __u32 max_display_mastering_luminance; + __u32 min_display_mastering_luminance; +}; + +/* Stateless VP9 controls */ + +#define V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED 0x1 +#define V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE 0x2 + +/** + * struct v4l2_vp9_loop_filter - VP9 loop filter parameters + * + * @ref_deltas: contains the adjustment needed for the filter level based on the + * chosen reference frame. If this syntax element is not present in the bitstream, + * users should pass its last value. + * @mode_deltas: contains the adjustment needed for the filter level based on the + * chosen mode. If this syntax element is not present in the bitstream, users should + * pass its last value. + * @level: indicates the loop filter strength. + * @sharpness: indicates the sharpness level. + * @flags: combination of V4L2_VP9_LOOP_FILTER_FLAG_{} flags. + * @reserved: padding field. Should be zeroed by applications. + * + * This structure contains all loop filter related parameters. See sections + * '7.2.8 Loop filter semantics' of the VP9 specification for more details. + */ +struct v4l2_vp9_loop_filter { + __s8 ref_deltas[4]; + __s8 mode_deltas[2]; + __u8 level; + __u8 sharpness; + __u8 flags; + __u8 reserved[7]; +}; + +/** + * struct v4l2_vp9_quantization - VP9 quantization parameters + * + * @base_q_idx: indicates the base frame qindex. + * @delta_q_y_dc: indicates the Y DC quantizer relative to base_q_idx. + * @delta_q_uv_dc: indicates the UV DC quantizer relative to base_q_idx. + * @delta_q_uv_ac: indicates the UV AC quantizer relative to base_q_idx. + * @reserved: padding field. Should be zeroed by applications. + * + * Encodes the quantization parameters. See section '7.2.9 Quantization params + * syntax' of the VP9 specification for more details. + */ +struct v4l2_vp9_quantization { + __u8 base_q_idx; + __s8 delta_q_y_dc; + __s8 delta_q_uv_dc; + __s8 delta_q_uv_ac; + __u8 reserved[4]; +}; + +#define V4L2_VP9_SEGMENTATION_FLAG_ENABLED 0x01 +#define V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP 0x02 +#define V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE 0x04 +#define V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA 0x08 +#define V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE 0x10 + +#define V4L2_VP9_SEG_LVL_ALT_Q 0 +#define V4L2_VP9_SEG_LVL_ALT_L 1 +#define V4L2_VP9_SEG_LVL_REF_FRAME 2 +#define V4L2_VP9_SEG_LVL_SKIP 3 +#define V4L2_VP9_SEG_LVL_MAX 4 + +#define V4L2_VP9_SEGMENT_FEATURE_ENABLED(id) (1 << (id)) +#define V4L2_VP9_SEGMENT_FEATURE_ENABLED_MASK 0xf + +/** + * struct v4l2_vp9_segmentation - VP9 segmentation parameters + * + * @feature_data: data attached to each feature. Data entry is only valid if + * the feature is enabled. The array shall be indexed with segment number as + * the first dimension (0..7) and one of V4L2_VP9_SEG_{} as the second dimension. + * @feature_enabled: bitmask defining which features are enabled in each segment. + * The value for each segment is a combination of V4L2_VP9_SEGMENT_FEATURE_ENABLED(id) + * values where id is one of V4L2_VP9_SEG_LVL_{}. + * @tree_probs: specifies the probability values to be used when decoding a + * Segment-ID. See '5.15. Segmentation map' section of the VP9 specification + * for more details. + * @pred_probs: specifies the probability values to be used when decoding a + * Predicted-Segment-ID. See '6.4.14. Get segment id syntax' section of :ref:`vp9` + * for more details. + * @flags: combination of V4L2_VP9_SEGMENTATION_FLAG_{} flags. + * @reserved: padding field. Should be zeroed by applications. + * + * Encodes the quantization parameters. See section '7.2.10 Segmentation params syntax' of + * the VP9 specification for more details. + */ +struct v4l2_vp9_segmentation { + __s16 feature_data[8][4]; + __u8 feature_enabled[8]; + __u8 tree_probs[7]; + __u8 pred_probs[3]; + __u8 flags; + __u8 reserved[5]; +}; + +#define V4L2_VP9_FRAME_FLAG_KEY_FRAME 0x001 +#define V4L2_VP9_FRAME_FLAG_SHOW_FRAME 0x002 +#define V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT 0x004 +#define V4L2_VP9_FRAME_FLAG_INTRA_ONLY 0x008 +#define V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV 0x010 +#define V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX 0x020 +#define V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE 0x040 +#define V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING 0x080 +#define V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING 0x100 +#define V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING 0x200 + +#define V4L2_VP9_SIGN_BIAS_LAST 0x1 +#define V4L2_VP9_SIGN_BIAS_GOLDEN 0x2 +#define V4L2_VP9_SIGN_BIAS_ALT 0x4 + +#define V4L2_VP9_RESET_FRAME_CTX_NONE 0 +#define V4L2_VP9_RESET_FRAME_CTX_SPEC 1 +#define V4L2_VP9_RESET_FRAME_CTX_ALL 2 + +#define V4L2_VP9_INTERP_FILTER_EIGHTTAP 0 +#define V4L2_VP9_INTERP_FILTER_EIGHTTAP_SMOOTH 1 +#define V4L2_VP9_INTERP_FILTER_EIGHTTAP_SHARP 2 +#define V4L2_VP9_INTERP_FILTER_BILINEAR 3 +#define V4L2_VP9_INTERP_FILTER_SWITCHABLE 4 + +#define V4L2_VP9_REFERENCE_MODE_SINGLE_REFERENCE 0 +#define V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE 1 +#define V4L2_VP9_REFERENCE_MODE_SELECT 2 + +#define V4L2_VP9_PROFILE_MAX 3 + +#define V4L2_CID_STATELESS_VP9_FRAME (V4L2_CID_CODEC_STATELESS_BASE + 300) +/** + * struct v4l2_ctrl_vp9_frame - VP9 frame decoding control + * + * @lf: loop filter parameters. See &v4l2_vp9_loop_filter for more details. + * @quant: quantization parameters. See &v4l2_vp9_quantization for more details. + * @seg: segmentation parameters. See &v4l2_vp9_segmentation for more details. + * @flags: combination of V4L2_VP9_FRAME_FLAG_{} flags. + * @compressed_header_size: compressed header size in bytes. + * @uncompressed_header_size: uncompressed header size in bytes. + * @frame_width_minus_1: add 1 to it and you'll get the frame width expressed in pixels. + * @frame_height_minus_1: add 1 to it and you'll get the frame height expressed in pixels. + * @render_width_minus_1: add 1 to it and you'll get the expected render width expressed in + * pixels. This is not used during the decoding process but might be used by HW scalers + * to prepare a frame that's ready for scanout. + * @render_height_minus_1: add 1 to it and you'll get the expected render height expressed in + * pixels. This is not used during the decoding process but might be used by HW scalers + * to prepare a frame that's ready for scanout. + * @last_frame_ts: "last" reference buffer timestamp. + * The timestamp refers to the timestamp field in struct v4l2_buffer. + * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64. + * @golden_frame_ts: "golden" reference buffer timestamp. + * The timestamp refers to the timestamp field in struct v4l2_buffer. + * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64. + * @alt_frame_ts: "alt" reference buffer timestamp. + * The timestamp refers to the timestamp field in struct v4l2_buffer. + * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64. + * @ref_frame_sign_bias: a bitfield specifying whether the sign bias is set for a given + * reference frame. Either of V4L2_VP9_SIGN_BIAS_{}. + * @reset_frame_context: specifies whether the frame context should be reset to default values. + * Either of V4L2_VP9_RESET_FRAME_CTX_{}. + * @frame_context_idx: frame context that should be used/updated. + * @profile: VP9 profile. Can be 0, 1, 2 or 3. + * @bit_depth: bits per components. Can be 8, 10 or 12. Note that not all profiles support + * 10 and/or 12 bits depths. + * @interpolation_filter: specifies the filter selection used for performing inter prediction. + * Set to one of V4L2_VP9_INTERP_FILTER_{}. + * @tile_cols_log2: specifies the base 2 logarithm of the width of each tile (where the width + * is measured in units of 8x8 blocks). Shall be less than or equal to 6. + * @tile_rows_log2: specifies the base 2 logarithm of the height of each tile (where the height + * is measured in units of 8x8 blocks). + * @reference_mode: specifies the type of inter prediction to be used. + * Set to one of V4L2_VP9_REFERENCE_MODE_{}. + * @reserved: padding field. Should be zeroed by applications. + */ +struct v4l2_ctrl_vp9_frame { + struct v4l2_vp9_loop_filter lf; + struct v4l2_vp9_quantization quant; + struct v4l2_vp9_segmentation seg; + __u32 flags; + __u16 compressed_header_size; + __u16 uncompressed_header_size; + __u16 frame_width_minus_1; + __u16 frame_height_minus_1; + __u16 render_width_minus_1; + __u16 render_height_minus_1; + __u64 last_frame_ts; + __u64 golden_frame_ts; + __u64 alt_frame_ts; + __u8 ref_frame_sign_bias; + __u8 reset_frame_context; + __u8 frame_context_idx; + __u8 profile; + __u8 bit_depth; + __u8 interpolation_filter; + __u8 tile_cols_log2; + __u8 tile_rows_log2; + __u8 reference_mode; + __u8 reserved[7]; +}; + +#define V4L2_VP9_NUM_FRAME_CTX 4 + +/** + * struct v4l2_vp9_mv_probs - VP9 Motion vector probability updates + * @joint: motion vector joint probability updates. + * @sign: motion vector sign probability updates. + * @classes: motion vector class probability updates. + * @class0_bit: motion vector class0 bit probability updates. + * @bits: motion vector bits probability updates. + * @class0_fr: motion vector class0 fractional bit probability updates. + * @fr: motion vector fractional bit probability updates. + * @class0_hp: motion vector class0 high precision fractional bit probability updates. + * @hp: motion vector high precision fractional bit probability updates. + * + * This structure contains new values of motion vector probabilities. + * A value of zero in an array element means there is no update of the relevant probability. + * See `struct v4l2_vp9_prob_updates` for details. + */ +struct v4l2_vp9_mv_probs { + __u8 joint[3]; + __u8 sign[2]; + __u8 classes[2][10]; + __u8 class0_bit[2]; + __u8 bits[2][10]; + __u8 class0_fr[2][2][3]; + __u8 fr[2][3]; + __u8 class0_hp[2]; + __u8 hp[2]; +}; + +#define V4L2_CID_STATELESS_VP9_COMPRESSED_HDR (V4L2_CID_CODEC_STATELESS_BASE + 301) + +#define V4L2_VP9_TX_MODE_ONLY_4X4 0 +#define V4L2_VP9_TX_MODE_ALLOW_8X8 1 +#define V4L2_VP9_TX_MODE_ALLOW_16X16 2 +#define V4L2_VP9_TX_MODE_ALLOW_32X32 3 +#define V4L2_VP9_TX_MODE_SELECT 4 + +/** + * struct v4l2_ctrl_vp9_compressed_hdr - VP9 probability updates control + * @tx_mode: specifies the TX mode. Set to one of V4L2_VP9_TX_MODE_{}. + * @tx8: TX 8x8 probability updates. + * @tx16: TX 16x16 probability updates. + * @tx32: TX 32x32 probability updates. + * @coef: coefficient probability updates. + * @skip: skip probability updates. + * @inter_mode: inter mode probability updates. + * @interp_filter: interpolation filter probability updates. + * @is_inter: is inter-block probability updates. + * @comp_mode: compound prediction mode probability updates. + * @single_ref: single ref probability updates. + * @comp_ref: compound ref probability updates. + * @y_mode: Y prediction mode probability updates. + * @uv_mode: UV prediction mode probability updates. + * @partition: partition probability updates. + * @mv: motion vector probability updates. + * + * This structure holds the probabilities update as parsed in the compressed + * header (Spec 6.3). These values represent the value of probability update after + * being translated with inv_map_table[] (see 6.3.5). A value of zero in an array element + * means that there is no update of the relevant probability. + * + * This control is optional and needs to be used when dealing with the hardware which is + * not capable of parsing the compressed header itself. Only drivers which need it will + * implement it. + */ +struct v4l2_ctrl_vp9_compressed_hdr { + __u8 tx_mode; + __u8 tx8[2][1]; + __u8 tx16[2][2]; + __u8 tx32[2][3]; + __u8 coef[4][2][2][6][6][3]; + __u8 skip[3]; + __u8 inter_mode[7][3]; + __u8 interp_filter[4][2]; + __u8 is_inter[4]; + __u8 comp_mode[5]; + __u8 single_ref[5][2]; + __u8 comp_ref[5]; + __u8 y_mode[4][9]; + __u8 uv_mode[10][9]; + __u8 partition[16][3]; + + struct v4l2_vp9_mv_probs mv; +}; + +/* Stateless AV1 controls */ + +#define V4L2_AV1_TOTAL_REFS_PER_FRAME 8 +#define V4L2_AV1_CDEF_MAX 8 +#define V4L2_AV1_NUM_PLANES_MAX 3 /* 1 if monochrome, 3 otherwise */ +#define V4L2_AV1_MAX_SEGMENTS 8 +#define V4L2_AV1_MAX_OPERATING_POINTS (1 << 5) /* 5 bits to encode */ +#define V4L2_AV1_REFS_PER_FRAME 7 +#define V4L2_AV1_MAX_NUM_Y_POINTS (1 << 4) /* 4 bits to encode */ +#define V4L2_AV1_MAX_NUM_CB_POINTS (1 << 4) /* 4 bits to encode */ +#define V4L2_AV1_MAX_NUM_CR_POINTS (1 << 4) /* 4 bits to encode */ +#define V4L2_AV1_AR_COEFFS_SIZE 25 /* (2 * 3 * (3 + 1)) + 1 */ +#define V4L2_AV1_MAX_NUM_PLANES 3 +#define V4L2_AV1_MAX_TILE_COLS 64 +#define V4L2_AV1_MAX_TILE_ROWS 64 +#define V4L2_AV1_MAX_TILE_COUNT 512 + +#define V4L2_AV1_SEQUENCE_FLAG_STILL_PICTURE 0x00000001 +#define V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK 0x00000002 +#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA 0x00000004 +#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER 0x00000008 +#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND 0x00000010 +#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND 0x00000020 +#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_WARPED_MOTION 0x00000040 +#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER 0x00000080 +#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_ORDER_HINT 0x00000100 +#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP 0x00000200 +#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_REF_FRAME_MVS 0x00000400 +#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_SUPERRES 0x00000800 +#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF 0x00001000 +#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_RESTORATION 0x00002000 +#define V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME 0x00004000 +#define V4L2_AV1_SEQUENCE_FLAG_COLOR_RANGE 0x00008000 +#define V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X 0x00010000 +#define V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y 0x00020000 +#define V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT 0x00040000 +#define V4L2_AV1_SEQUENCE_FLAG_SEPARATE_UV_DELTA_Q 0x00080000 + +#define V4L2_CID_STATELESS_AV1_SEQUENCE (V4L2_CID_CODEC_STATELESS_BASE + 500) +/** + * struct v4l2_ctrl_av1_sequence - AV1 Sequence + * + * Represents an AV1 Sequence OBU. See section 5.5 "Sequence header OBU syntax" + * for more details. + * + * @flags: See V4L2_AV1_SEQUENCE_FLAG_{}. + * @seq_profile: specifies the features that can be used in the coded video + * sequence. + * @order_hint_bits: specifies the number of bits used for the order_hint field + * at each frame. + * @bit_depth: the bitdepth to use for the sequence as described in section + * 5.5.2 "Color config syntax". + * @reserved: padding field. Should be zeroed by applications. + * @max_frame_width_minus_1: specifies the maximum frame width minus 1 for the + * frames represented by this sequence header. + * @max_frame_height_minus_1: specifies the maximum frame height minus 1 for the + * frames represented by this sequence header. + */ +struct v4l2_ctrl_av1_sequence { + __u32 flags; + __u8 seq_profile; + __u8 order_hint_bits; + __u8 bit_depth; + __u8 reserved; + __u16 max_frame_width_minus_1; + __u16 max_frame_height_minus_1; +}; + +#define V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY (V4L2_CID_CODEC_STATELESS_BASE + 501) +/** + * struct v4l2_ctrl_av1_tile_group_entry - AV1 Tile Group entry + * + * Represents a single AV1 tile inside an AV1 Tile Group. Note that MiRowStart, + * MiRowEnd, MiColStart and MiColEnd can be retrieved from struct + * v4l2_av1_tile_info in struct v4l2_ctrl_av1_frame using tile_row and + * tile_col. See section 6.10.1 "General tile group OBU semantics" for more + * details. + * + * @tile_offset: offset from the OBU data, i.e. where the coded tile data + * actually starts. + * @tile_size: specifies the size in bytes of the coded tile. Equivalent to + * "TileSize" in the AV1 Specification. + * @tile_row: specifies the row of the current tile. Equivalent to "TileRow" in + * the AV1 Specification. + * @tile_col: specifies the col of the current tile. Equivalent to "TileCol" in + * the AV1 Specification. + */ +struct v4l2_ctrl_av1_tile_group_entry { + __u32 tile_offset; + __u32 tile_size; + __u32 tile_row; + __u32 tile_col; +}; + +/** + * enum v4l2_av1_warp_model - AV1 Warp Model as described in section 3 + * "Symbols and abbreviated terms" of the AV1 Specification. + * + * @V4L2_AV1_WARP_MODEL_IDENTITY: Warp model is just an identity transform. + * @V4L2_AV1_WARP_MODEL_TRANSLATION: Warp model is a pure translation. + * @V4L2_AV1_WARP_MODEL_ROTZOOM: Warp model is a rotation + symmetric zoom + + * translation. + * @V4L2_AV1_WARP_MODEL_AFFINE: Warp model is a general affine transform. + */ +enum v4l2_av1_warp_model { + V4L2_AV1_WARP_MODEL_IDENTITY = 0, + V4L2_AV1_WARP_MODEL_TRANSLATION = 1, + V4L2_AV1_WARP_MODEL_ROTZOOM = 2, + V4L2_AV1_WARP_MODEL_AFFINE = 3, +}; + +/** + * enum v4l2_av1_reference_frame - AV1 reference frames + * + * @V4L2_AV1_REF_INTRA_FRAME: Intra Frame Reference + * @V4L2_AV1_REF_LAST_FRAME: Last Reference Frame + * @V4L2_AV1_REF_LAST2_FRAME: Last2 Reference Frame + * @V4L2_AV1_REF_LAST3_FRAME: Last3 Reference Frame + * @V4L2_AV1_REF_GOLDEN_FRAME: Golden Reference Frame + * @V4L2_AV1_REF_BWDREF_FRAME: BWD Reference Frame + * @V4L2_AV1_REF_ALTREF2_FRAME: Alternative2 Reference Frame + * @V4L2_AV1_REF_ALTREF_FRAME: Alternative Reference Frame + */ +enum v4l2_av1_reference_frame { + V4L2_AV1_REF_INTRA_FRAME = 0, + V4L2_AV1_REF_LAST_FRAME = 1, + V4L2_AV1_REF_LAST2_FRAME = 2, + V4L2_AV1_REF_LAST3_FRAME = 3, + V4L2_AV1_REF_GOLDEN_FRAME = 4, + V4L2_AV1_REF_BWDREF_FRAME = 5, + V4L2_AV1_REF_ALTREF2_FRAME = 6, + V4L2_AV1_REF_ALTREF_FRAME = 7, +}; + +#define V4L2_AV1_GLOBAL_MOTION_IS_INVALID(ref) (1 << (ref)) + +#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_GLOBAL 0x1 +#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_ROT_ZOOM 0x2 +#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_TRANSLATION 0x4 +/** + * struct v4l2_av1_global_motion - AV1 Global Motion parameters as described in + * section 6.8.17 "Global motion params semantics" of the AV1 specification. + * + * @flags: A bitfield containing the flags per reference frame. See + * V4L2_AV1_GLOBAL_MOTION_FLAG_{} + * @type: The type of global motion transform used. + * @params: this field has the same meaning as "gm_params" in the AV1 + * specification. + * @invalid: bitfield indicating whether the global motion params are invalid + * for a given reference frame. See section 7.11.3.6 Setup shear process and + * the variable "warpValid". Use V4L2_AV1_GLOBAL_MOTION_IS_INVALID(ref) to + * create a suitable mask. + * @reserved: padding field. Should be zeroed by applications. + */ + +struct v4l2_av1_global_motion { + __u8 flags[V4L2_AV1_TOTAL_REFS_PER_FRAME]; + enum v4l2_av1_warp_model type[V4L2_AV1_TOTAL_REFS_PER_FRAME]; + __s32 params[V4L2_AV1_TOTAL_REFS_PER_FRAME][6]; + __u8 invalid; + __u8 reserved[3]; +}; + +/** + * enum v4l2_av1_frame_restoration_type - AV1 Frame Restoration Type + * @V4L2_AV1_FRAME_RESTORE_NONE: no filtering is applied. + * @V4L2_AV1_FRAME_RESTORE_WIENER: Wiener filter process is invoked. + * @V4L2_AV1_FRAME_RESTORE_SGRPROJ: self guided filter process is invoked. + * @V4L2_AV1_FRAME_RESTORE_SWITCHABLE: restoration filter is swichtable. + */ +enum v4l2_av1_frame_restoration_type { + V4L2_AV1_FRAME_RESTORE_NONE = 0, + V4L2_AV1_FRAME_RESTORE_WIENER = 1, + V4L2_AV1_FRAME_RESTORE_SGRPROJ = 2, + V4L2_AV1_FRAME_RESTORE_SWITCHABLE = 3, +}; + +#define V4L2_AV1_LOOP_RESTORATION_FLAG_USES_LR 0x1 +#define V4L2_AV1_LOOP_RESTORATION_FLAG_USES_CHROMA_LR 0x2 + +/** + * struct v4l2_av1_loop_restoration - AV1 Loop Restauration as described in + * section 6.10.15 "Loop restoration params semantics" of the AV1 specification. + * + * @flags: See V4L2_AV1_LOOP_RESTORATION_FLAG_{}. + * @lr_unit_shift: specifies if the luma restoration size should be halved. + * @lr_uv_shift: specifies if the chroma size should be half the luma size. + * @reserved: padding field. Should be zeroed by applications. + * @frame_restoration_type: specifies the type of restoration used for each + * plane. See enum v4l2_av1_frame_restoration_type. + * @loop_restoration_size: specifies the size of loop restoration units in units + * of samples in the current plane. + */ +struct v4l2_av1_loop_restoration { + __u8 flags; + __u8 lr_unit_shift; + __u8 lr_uv_shift; + __u8 reserved; + enum v4l2_av1_frame_restoration_type frame_restoration_type[V4L2_AV1_NUM_PLANES_MAX]; + __u32 loop_restoration_size[V4L2_AV1_MAX_NUM_PLANES]; +}; + +/** + * struct v4l2_av1_cdef - AV1 CDEF params semantics as described in section + * 6.10.14 "CDEF params semantics" of the AV1 specification + * + * @damping_minus_3: controls the amount of damping in the deringing filter. + * @bits: specifies the number of bits needed to specify which CDEF filter to + * apply. + * @y_pri_strength: specifies the strength of the primary filter. + * @y_sec_strength: specifies the strength of the secondary filter. + * @uv_pri_strength: specifies the strength of the primary filter. + * @uv_sec_strength: specifies the strength of the secondary filter. + */ +struct v4l2_av1_cdef { + __u8 damping_minus_3; + __u8 bits; + __u8 y_pri_strength[V4L2_AV1_CDEF_MAX]; + __u8 y_sec_strength[V4L2_AV1_CDEF_MAX]; + __u8 uv_pri_strength[V4L2_AV1_CDEF_MAX]; + __u8 uv_sec_strength[V4L2_AV1_CDEF_MAX]; +}; + +#define V4L2_AV1_SEGMENTATION_FLAG_ENABLED 0x1 +#define V4L2_AV1_SEGMENTATION_FLAG_UPDATE_MAP 0x2 +#define V4L2_AV1_SEGMENTATION_FLAG_TEMPORAL_UPDATE 0x4 +#define V4L2_AV1_SEGMENTATION_FLAG_UPDATE_DATA 0x8 +#define V4L2_AV1_SEGMENTATION_FLAG_SEG_ID_PRE_SKIP 0x10 + +/** + * enum v4l2_av1_segment_feature - AV1 segment features as described in section + * 3 "Symbols and abbreviated terms" of the AV1 specification. + * + * @V4L2_AV1_SEG_LVL_ALT_Q: Index for quantizer segment feature. + * @V4L2_AV1_SEG_LVL_ALT_LF_Y_V: Index for vertical luma loop filter segment + * feature. + * @V4L2_AV1_SEG_LVL_REF_FRAME: Index for reference frame segment feature. + * @V4L2_AV1_SEG_LVL_REF_SKIP: Index for skip segment feature. + * @V4L2_AV1_SEG_LVL_REF_GLOBALMV: Index for global mv feature. + * @V4L2_AV1_SEG_LVL_MAX: Number of segment features. + */ +enum v4l2_av1_segment_feature { + V4L2_AV1_SEG_LVL_ALT_Q = 0, + V4L2_AV1_SEG_LVL_ALT_LF_Y_V = 1, + V4L2_AV1_SEG_LVL_REF_FRAME = 5, + V4L2_AV1_SEG_LVL_REF_SKIP = 6, + V4L2_AV1_SEG_LVL_REF_GLOBALMV = 7, + V4L2_AV1_SEG_LVL_MAX = 8 +}; + +#define V4L2_AV1_SEGMENT_FEATURE_ENABLED(id) (1 << (id)) + +/** + * struct v4l2_av1_segmentation - AV1 Segmentation params as defined in section + * 6.8.13 "Segmentation params semantics" of the AV1 specification. + * + * @flags: see V4L2_AV1_SEGMENTATION_FLAG_{}. + * @last_active_seg_id: indicates the highest numbered segment id that has some + * enabled feature. This is used when decoding the segment id to only decode + * choices corresponding to used segments. + * @feature_enabled: bitmask defining which features are enabled in each + * segment. Use V4L2_AV1_SEGMENT_FEATURE_ENABLED to build a suitable mask. + * @feature_data: data attached to each feature. Data entry is only valid if the + * feature is enabled + */ +struct v4l2_av1_segmentation { + __u8 flags; + __u8 last_active_seg_id; + __u8 feature_enabled[V4L2_AV1_MAX_SEGMENTS]; + __s16 feature_data[V4L2_AV1_MAX_SEGMENTS][V4L2_AV1_SEG_LVL_MAX]; +}; + +#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_ENABLED 0x1 +#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_UPDATE 0x2 +#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_PRESENT 0x4 +#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_MULTI 0x8 + +/** + * struct v4l2_av1_loop_filter - AV1 Loop filter params as defined in section + * 6.8.10 "Loop filter semantics" and 6.8.16 "Loop filter delta parameters + * semantics" of the AV1 specification. + * + * @flags: see V4L2_AV1_LOOP_FILTER_FLAG_{} + * @level: an array containing loop filter strength values. Different loop + * filter strength values from the array are used depending on the image plane + * being filtered, and the edge direction (vertical or horizontal) being + * filtered. + * @sharpness: indicates the sharpness level. The loop_filter_level and + * loop_filter_sharpness together determine when a block edge is filtered, and + * by how much the filtering can change the sample values. The loop filter + * process is described in section 7.14 of the AV1 specification. + * @ref_deltas: contains the adjustment needed for the filter level based on the + * chosen reference frame. If this syntax element is not present, it maintains + * its previous value. + * @mode_deltas: contains the adjustment needed for the filter level based on + * the chosen mode. If this syntax element is not present, it maintains its + * previous value. + * @delta_lf_res: specifies the left shift which should be applied to decoded + * loop filter delta values. + */ +struct v4l2_av1_loop_filter { + __u8 flags; + __u8 level[4]; + __u8 sharpness; + __s8 ref_deltas[V4L2_AV1_TOTAL_REFS_PER_FRAME]; + __s8 mode_deltas[2]; + __u8 delta_lf_res; +}; + +#define V4L2_AV1_QUANTIZATION_FLAG_DIFF_UV_DELTA 0x1 +#define V4L2_AV1_QUANTIZATION_FLAG_USING_QMATRIX 0x2 +#define V4L2_AV1_QUANTIZATION_FLAG_DELTA_Q_PRESENT 0x4 + +/** + * struct v4l2_av1_quantization - AV1 Quantization params as defined in section + * 6.8.11 "Quantization params semantics" of the AV1 specification. + * + * @flags: see V4L2_AV1_QUANTIZATION_FLAG_{} + * @base_q_idx: indicates the base frame qindex. This is used for Y AC + * coefficients and as the base value for the other quantizers. + * @delta_q_y_dc: indicates the Y DC quantizer relative to base_q_idx. + * @delta_q_u_dc: indicates the U DC quantizer relative to base_q_idx. + * @delta_q_u_ac: indicates the U AC quantizer relative to base_q_idx. + * @delta_q_v_dc: indicates the V DC quantizer relative to base_q_idx. + * @delta_q_v_ac: indicates the V AC quantizer relative to base_q_idx. + * @qm_y: specifies the level in the quantizer matrix that should be used for + * luma plane decoding. + * @qm_u: specifies the level in the quantizer matrix that should be used for + * chroma U plane decoding. + * @qm_v: specifies the level in the quantizer matrix that should be used for + * chroma V plane decoding. + * @delta_q_res: specifies the left shift which should be applied to decoded + * quantizer index delta values. + */ +struct v4l2_av1_quantization { + __u8 flags; + __u8 base_q_idx; + __s8 delta_q_y_dc; + __s8 delta_q_u_dc; + __s8 delta_q_u_ac; + __s8 delta_q_v_dc; + __s8 delta_q_v_ac; + __u8 qm_y; + __u8 qm_u; + __u8 qm_v; + __u8 delta_q_res; +}; + +#define V4L2_AV1_TILE_INFO_FLAG_UNIFORM_TILE_SPACING 0x1 + +/** + * struct v4l2_av1_tile_info - AV1 Tile info as defined in section 6.8.14 "Tile + * info semantics" of the AV1 specification. + * + * @flags: see V4L2_AV1_TILE_INFO_FLAG_{} + * @context_update_tile_id: specifies which tile to use for the CDF update. + * @tile_rows: specifies the number of tiles down the frame. + * @tile_cols: specifies the number of tiles across the frame. + * @mi_col_starts: an array specifying the start column (in units of 4x4 luma + * samples) for each tile across the image. + * @mi_row_starts: an array specifying the start row (in units of 4x4 luma + * samples) for each tile down the image. + * @width_in_sbs_minus_1: specifies the width of a tile minus 1 in units of + * superblocks. + * @height_in_sbs_minus_1: specifies the height of a tile minus 1 in units of + * superblocks. + * @tile_size_bytes: specifies the number of bytes needed to code each tile + * size. + * @reserved: padding field. Should be zeroed by applications. + */ +struct v4l2_av1_tile_info { + __u8 flags; + __u8 context_update_tile_id; + __u8 tile_cols; + __u8 tile_rows; + __u32 mi_col_starts[V4L2_AV1_MAX_TILE_COLS + 1]; + __u32 mi_row_starts[V4L2_AV1_MAX_TILE_ROWS + 1]; + __u32 width_in_sbs_minus_1[V4L2_AV1_MAX_TILE_COLS]; + __u32 height_in_sbs_minus_1[V4L2_AV1_MAX_TILE_ROWS]; + __u8 tile_size_bytes; + __u8 reserved[3]; +}; + +/** + * enum v4l2_av1_frame_type - AV1 Frame Type + * + * @V4L2_AV1_KEY_FRAME: Key frame + * @V4L2_AV1_INTER_FRAME: Inter frame + * @V4L2_AV1_INTRA_ONLY_FRAME: Intra-only frame + * @V4L2_AV1_SWITCH_FRAME: Switch frame + */ +enum v4l2_av1_frame_type { + V4L2_AV1_KEY_FRAME = 0, + V4L2_AV1_INTER_FRAME = 1, + V4L2_AV1_INTRA_ONLY_FRAME = 2, + V4L2_AV1_SWITCH_FRAME = 3 +}; + +/** + * enum v4l2_av1_interpolation_filter - AV1 interpolation filter types + * + * @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP: eight tap filter + * @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH: eight tap smooth filter + * @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SHARP: eight tap sharp filter + * @V4L2_AV1_INTERPOLATION_FILTER_BILINEAR: bilinear filter + * @V4L2_AV1_INTERPOLATION_FILTER_SWITCHABLE: filter selection is signaled at + * the block level + * + * See section 6.8.9 "Interpolation filter semantics" of the AV1 specification + * for more details. + */ +enum v4l2_av1_interpolation_filter { + V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP = 0, + V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH = 1, + V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SHARP = 2, + V4L2_AV1_INTERPOLATION_FILTER_BILINEAR = 3, + V4L2_AV1_INTERPOLATION_FILTER_SWITCHABLE = 4, +}; + +/** + * enum v4l2_av1_tx_mode - AV1 Tx mode as described in section 6.8.21 "TX mode + * semantics" of the AV1 specification. + * @V4L2_AV1_TX_MODE_ONLY_4X4: the inverse transform will use only 4x4 + * transforms + * @V4L2_AV1_TX_MODE_LARGEST: the inverse transform will use the largest + * transform size that fits inside the block + * @V4L2_AV1_TX_MODE_SELECT: the choice of transform size is specified + * explicitly for each block. + */ +enum v4l2_av1_tx_mode { + V4L2_AV1_TX_MODE_ONLY_4X4 = 0, + V4L2_AV1_TX_MODE_LARGEST = 1, + V4L2_AV1_TX_MODE_SELECT = 2 +}; + +#define V4L2_AV1_FRAME_FLAG_SHOW_FRAME 0x00000001 +#define V4L2_AV1_FRAME_FLAG_SHOWABLE_FRAME 0x00000002 +#define V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE 0x00000004 +#define V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE 0x00000008 +#define V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS 0x00000010 +#define V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV 0x00000020 +#define V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC 0x00000040 +#define V4L2_AV1_FRAME_FLAG_USE_SUPERRES 0x00000080 +#define V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV 0x00000100 +#define V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE 0x00000200 +#define V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS 0x00000400 +#define V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF 0x00000800 +#define V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION 0x00001000 +#define V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT 0x00002000 +#define V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET 0x00004000 +#define V4L2_AV1_FRAME_FLAG_SKIP_MODE_ALLOWED 0x00008000 +#define V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT 0x00010000 +#define V4L2_AV1_FRAME_FLAG_FRAME_SIZE_OVERRIDE 0x00020000 +#define V4L2_AV1_FRAME_FLAG_BUFFER_REMOVAL_TIME_PRESENT 0x00040000 +#define V4L2_AV1_FRAME_FLAG_FRAME_REFS_SHORT_SIGNALING 0x00080000 + +#define V4L2_CID_STATELESS_AV1_FRAME (V4L2_CID_CODEC_STATELESS_BASE + 502) +/** + * struct v4l2_ctrl_av1_frame - Represents an AV1 Frame Header OBU. + * + * @tile_info: tile info + * @quantization: quantization params + * @segmentation: segmentation params + * @superres_denom: the denominator for the upscaling ratio. + * @loop_filter: loop filter params + * @cdef: cdef params + * @skip_mode_frame: specifies the frames to use for compound prediction when + * skip_mode is equal to 1. + * @primary_ref_frame: specifies which reference frame contains the CDF values + * and other state that should be loaded at the start of the frame. + * @loop_restoration: loop restoration params + * @global_motion: global motion params + * @flags: see V4L2_AV1_FRAME_FLAG_{} + * @frame_type: specifies the AV1 frame type + * @order_hint: specifies OrderHintBits least significant bits of the expected + * output order for this frame. + * @upscaled_width: the upscaled width. + * @interpolation_filter: specifies the filter selection used for performing + * inter prediction. + * @tx_mode: specifies how the transform size is determined. + * @frame_width_minus_1: add 1 to get the frame's width. + * @frame_height_minus_1: add 1 to get the frame's height + * @render_width_minus_1: add 1 to get the render width of the frame in luma + * samples. + * @render_height_minus_1: add 1 to get the render height of the frame in luma + * samples. + * @current_frame_id: specifies the frame id number for the current frame. Frame + * id numbers are additional information that do not affect the decoding + * process, but provide decoders with a way of detecting missing reference + * frames so that appropriate action can be taken. + * @buffer_removal_time: specifies the frame removal time in units of DecCT clock + * ticks counted from the removal time of the last random access point for + * operating point opNum. + * @reserved: padding field. Should be zeroed by applications. + * @order_hints: specifies the expected output order hint for each reference + * frame. This field corresponds to the OrderHints variable from the + * specification (section 5.9.2 "Uncompressed header syntax"). As such, this is + * only used for non-intra frames and ignored otherwise. order_hints[0] is + * always ignored. + * @reference_frame_ts: the V4L2 timestamp of the reference frame slots. + * @ref_frame_idx: used to index into @reference_frame_ts when decoding + * inter-frames. The meaning of this array is the same as in the specification. + * The timestamp refers to the timestamp field in struct v4l2_buffer. Use + * v4l2_timeval_to_ns() to convert the struct timeval to a __u64. + * @refresh_frame_flags: contains a bitmask that specifies which reference frame + * slots will be updated with the current frame after it is decoded. + */ +struct v4l2_ctrl_av1_frame { + struct v4l2_av1_tile_info tile_info; + struct v4l2_av1_quantization quantization; + __u8 superres_denom; + struct v4l2_av1_segmentation segmentation; + struct v4l2_av1_loop_filter loop_filter; + struct v4l2_av1_cdef cdef; + __u8 skip_mode_frame[2]; + __u8 primary_ref_frame; + struct v4l2_av1_loop_restoration loop_restoration; + struct v4l2_av1_global_motion global_motion; + __u32 flags; + enum v4l2_av1_frame_type frame_type; + __u32 order_hint; + __u32 upscaled_width; + enum v4l2_av1_interpolation_filter interpolation_filter; + enum v4l2_av1_tx_mode tx_mode; + __u32 frame_width_minus_1; + __u32 frame_height_minus_1; + __u16 render_width_minus_1; + __u16 render_height_minus_1; + + __u32 current_frame_id; + __u32 buffer_removal_time[V4L2_AV1_MAX_OPERATING_POINTS]; + __u8 reserved[4]; + __u32 order_hints[V4L2_AV1_TOTAL_REFS_PER_FRAME]; + __u64 reference_frame_ts[V4L2_AV1_TOTAL_REFS_PER_FRAME]; + __s8 ref_frame_idx[V4L2_AV1_REFS_PER_FRAME]; + __u8 refresh_frame_flags; +}; + +#define V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN 0x1 +#define V4L2_AV1_FILM_GRAIN_FLAG_UPDATE_GRAIN 0x2 +#define V4L2_AV1_FILM_GRAIN_FLAG_CHROMA_SCALING_FROM_LUMA 0x4 +#define V4L2_AV1_FILM_GRAIN_FLAG_OVERLAP 0x8 +#define V4L2_AV1_FILM_GRAIN_FLAG_CLIP_TO_RESTRICTED_RANGE 0x10 + +#define V4L2_CID_STATELESS_AV1_FILM_GRAIN (V4L2_CID_CODEC_STATELESS_BASE + 505) +/** + * struct v4l2_ctrl_av1_film_grain - AV1 Film Grain parameters. + * + * Film grain parameters as specified by section 6.8.20 of the AV1 Specification. + * + * @flags: see V4L2_AV1_FILM_GRAIN_{}. + * @cr_mult: represents a multiplier for the cr component used in derivation of + * the input index to the cr component scaling function. + * @grain_seed: specifies the starting value for the pseudo-random numbers used + * during film grain synthesis. + * @film_grain_params_ref_idx: indicates which reference frame contains the + * film grain parameters to be used for this frame. + * @num_y_points: specifies the number of points for the piece-wise linear + * scaling function of the luma component. + * @point_y_value: represents the x (luma value) coordinate for the i-th point + * of the piecewise linear scaling function for luma component. The values are + * signaled on the scale of 0..255. In case of 10 bit video, these values + * correspond to luma values divided by 4. In case of 12 bit video, these values + * correspond to luma values divided by 16. + * @point_y_scaling: represents the scaling (output) value for the i-th point + * of the piecewise linear scaling function for luma component. + * @num_cb_points: specifies the number of points for the piece-wise linear + * scaling function of the cb component. + * @point_cb_value: represents the x coordinate for the i-th point of the + * piece-wise linear scaling function for cb component. The values are signaled + * on the scale of 0..255. + * @point_cb_scaling: represents the scaling (output) value for the i-th point + * of the piecewise linear scaling function for cb component. + * @num_cr_points: specifies represents the number of points for the piece-wise + * linear scaling function of the cr component. + * @point_cr_value: represents the x coordinate for the i-th point of the + * piece-wise linear scaling function for cr component. The values are signaled + * on the scale of 0..255. + * @point_cr_scaling: represents the scaling (output) value for the i-th point + * of the piecewise linear scaling function for cr component. + * @grain_scaling_minus_8: represents the shift – 8 applied to the values of the + * chroma component. The grain_scaling_minus_8 can take values of 0..3 and + * determines the range and quantization step of the standard deviation of film + * grain. + * @ar_coeff_lag: specifies the number of auto-regressive coefficients for luma + * and chroma. + * @ar_coeffs_y_plus_128: specifies auto-regressive coefficients used for the Y + * plane. + * @ar_coeffs_cb_plus_128: specifies auto-regressive coefficients used for the U + * plane. + * @ar_coeffs_cr_plus_128: specifies auto-regressive coefficients used for the V + * plane. + * @ar_coeff_shift_minus_6: specifies the range of the auto-regressive + * coefficients. Values of 0, 1, 2, and 3 correspond to the ranges for + * auto-regressive coefficients of [-2, 2), [-1, 1), [-0.5, 0.5) and [-0.25, + * 0.25) respectively. + * @grain_scale_shift: specifies how much the Gaussian random numbers should be + * scaled down during the grain synthesis process. + * @cb_mult: represents a multiplier for the cb component used in derivation of + * the input index to the cb component scaling function. + * @cb_luma_mult: represents a multiplier for the average luma component used in + * derivation of the input index to the cb component scaling function. + * @cr_luma_mult: represents a multiplier for the average luma component used in + * derivation of the input index to the cr component scaling function. + * @cb_offset: represents an offset used in derivation of the input index to the + * cb component scaling function. + * @cr_offset: represents an offset used in derivation of the input index to the + * cr component scaling function. + * @reserved: padding field. Should be zeroed by applications. + */ +struct v4l2_ctrl_av1_film_grain { + __u8 flags; + __u8 cr_mult; + __u16 grain_seed; + __u8 film_grain_params_ref_idx; + __u8 num_y_points; + __u8 point_y_value[V4L2_AV1_MAX_NUM_Y_POINTS]; + __u8 point_y_scaling[V4L2_AV1_MAX_NUM_Y_POINTS]; + __u8 num_cb_points; + __u8 point_cb_value[V4L2_AV1_MAX_NUM_CB_POINTS]; + __u8 point_cb_scaling[V4L2_AV1_MAX_NUM_CB_POINTS]; + __u8 num_cr_points; + __u8 point_cr_value[V4L2_AV1_MAX_NUM_CR_POINTS]; + __u8 point_cr_scaling[V4L2_AV1_MAX_NUM_CR_POINTS]; + __u8 grain_scaling_minus_8; + __u8 ar_coeff_lag; + __u8 ar_coeffs_y_plus_128[V4L2_AV1_AR_COEFFS_SIZE]; + __u8 ar_coeffs_cb_plus_128[V4L2_AV1_AR_COEFFS_SIZE]; + __u8 ar_coeffs_cr_plus_128[V4L2_AV1_AR_COEFFS_SIZE]; + __u8 ar_coeff_shift_minus_6; + __u8 grain_scale_shift; + __u8 cb_mult; + __u8 cb_luma_mult; + __u8 cr_luma_mult; + __u16 cb_offset; + __u16 cr_offset; + __u8 reserved[4]; +}; + +/* MPEG-compression definitions kept for backwards compatibility */ +#define V4L2_CTRL_CLASS_MPEG V4L2_CTRL_CLASS_CODEC +#define V4L2_CID_MPEG_CLASS V4L2_CID_CODEC_CLASS +#define V4L2_CID_MPEG_BASE V4L2_CID_CODEC_BASE +#define V4L2_CID_MPEG_CX2341X_BASE V4L2_CID_CODEC_CX2341X_BASE +#define V4L2_CID_MPEG_MFC51_BASE V4L2_CID_CODEC_MFC51_BASE + +#endif diff --git a/spider-cam/libcamera/include/linux/v4l2-mediabus.h b/spider-cam/libcamera/include/linux/v4l2-mediabus.h new file mode 100644 index 0000000..097ef73 --- /dev/null +++ b/spider-cam/libcamera/include/linux/v4l2-mediabus.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Media Bus API header + * + * Copyright (C) 2009, Guennadi Liakhovetski + */ + +#ifndef __LINUX_V4L2_MEDIABUS_H +#define __LINUX_V4L2_MEDIABUS_H + +#include +#include +#include + +#define V4L2_MBUS_FRAMEFMT_SET_CSC 0x0001 + +/** + * struct v4l2_mbus_framefmt - frame format on the media bus + * @width: image width + * @height: image height + * @code: data format code (from enum v4l2_mbus_pixelcode) + * @field: used interlacing type (from enum v4l2_field), zero for metadata + * mbus codes + * @colorspace: colorspace of the data (from enum v4l2_colorspace), zero on + * metadata mbus codes + * @ycbcr_enc: YCbCr encoding of the data (from enum v4l2_ycbcr_encoding), zero + * for metadata mbus codes + * @hsv_enc: HSV encoding of the data (from enum v4l2_hsv_encoding), zero for + * metadata mbus codes + * @quantization: quantization of the data (from enum v4l2_quantization), zero + * for metadata mbus codes + * @xfer_func: transfer function of the data (from enum v4l2_xfer_func), zero + * for metadata mbus codes + * @flags: flags (V4L2_MBUS_FRAMEFMT_*) + * @reserved: reserved bytes that can be later used + */ +struct v4l2_mbus_framefmt { + __u32 width; + __u32 height; + __u32 code; + __u32 field; + __u32 colorspace; + union { + /* enum v4l2_ycbcr_encoding */ + __u16 ycbcr_enc; + /* enum v4l2_hsv_encoding */ + __u16 hsv_enc; + }; + __u16 quantization; + __u16 xfer_func; + __u16 flags; + __u16 reserved[10]; +}; + +/* + * enum v4l2_mbus_pixelcode and its definitions are now deprecated, and + * MEDIA_BUS_FMT_ definitions (defined in media-bus-format.h) should be + * used instead. + * + * New defines should only be added to media-bus-format.h. The + * v4l2_mbus_pixelcode enum is frozen. + */ + +#define V4L2_MBUS_FROM_MEDIA_BUS_FMT(name) \ + V4L2_MBUS_FMT_ ## name = MEDIA_BUS_FMT_ ## name + +enum v4l2_mbus_pixelcode { + V4L2_MBUS_FROM_MEDIA_BUS_FMT(FIXED), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB444_2X8_PADHI_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB444_2X8_PADHI_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB555_2X8_PADHI_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB555_2X8_PADHI_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(BGR565_2X8_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(BGR565_2X8_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB565_2X8_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB565_2X8_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB666_1X18), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_1X24), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(ARGB8888_1X32), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UV8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_1_5X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_1_5X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_1_5X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_1_5X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_2X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_2X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_2X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_2X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y10_1X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY10_2X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY10_2X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV10_2X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU10_2X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y12_1X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_1X16), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_1X16), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_1X16), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_1X16), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YDYUYDYV8_1X16), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY10_1X20), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY10_1X20), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV10_1X20), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU10_1X20), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUV10_1X30), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(AYUV8_1X32), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY12_2X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY12_2X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV12_2X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU12_2X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY12_1X24), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY12_1X24), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV12_1X24), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU12_1X24), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_ALAW8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_ALAW8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_ALAW8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_ALAW8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_DPCM8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_DPCM8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_DPCM8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_DPCM8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADHI_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADHI_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADLO_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADLO_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_1X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_1X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_1X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_1X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR12_1X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG12_1X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG12_1X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB12_1X12), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(JPEG_1X8), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(S5C_UYVY_JPEG_1X8), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(AHSV8888_1X32), +}; + +#endif diff --git a/spider-cam/libcamera/include/linux/v4l2-subdev.h b/spider-cam/libcamera/include/linux/v4l2-subdev.h new file mode 100644 index 0000000..2347e26 --- /dev/null +++ b/spider-cam/libcamera/include/linux/v4l2-subdev.h @@ -0,0 +1,304 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * V4L2 subdev userspace API + * + * Copyright (C) 2010 Nokia Corporation + * + * Contacts: Laurent Pinchart + * Sakari Ailus + */ + +#ifndef __LINUX_V4L2_SUBDEV_H +#define __LINUX_V4L2_SUBDEV_H + +#include +#include +#include +#include +#include + +/** + * enum v4l2_subdev_format_whence - Media bus format type + * @V4L2_SUBDEV_FORMAT_TRY: try format, for negotiation only + * @V4L2_SUBDEV_FORMAT_ACTIVE: active format, applied to the device + */ +enum v4l2_subdev_format_whence { + V4L2_SUBDEV_FORMAT_TRY = 0, + V4L2_SUBDEV_FORMAT_ACTIVE = 1, +}; + +/** + * struct v4l2_subdev_format - Pad-level media bus format + * @which: format type (from enum v4l2_subdev_format_whence) + * @pad: pad number, as reported by the media API + * @format: media bus format (format code and frame size) + * @stream: stream number, defined in subdev routing + * @reserved: drivers and applications must zero this array + */ +struct v4l2_subdev_format { + __u32 which; + __u32 pad; + struct v4l2_mbus_framefmt format; + __u32 stream; + __u32 reserved[7]; +}; + +/** + * struct v4l2_subdev_crop - Pad-level crop settings + * @which: format type (from enum v4l2_subdev_format_whence) + * @pad: pad number, as reported by the media API + * @rect: pad crop rectangle boundaries + * @stream: stream number, defined in subdev routing + * @reserved: drivers and applications must zero this array + * + * The subdev crop API is an obsolete interface and may be removed in the + * future. It is superseded by the selection API. No new extensions to this + * structure will be accepted. + */ +struct v4l2_subdev_crop { + __u32 which; + __u32 pad; + struct v4l2_rect rect; + __u32 stream; + __u32 reserved[7]; +}; + +#define V4L2_SUBDEV_MBUS_CODE_CSC_COLORSPACE 0x00000001 +#define V4L2_SUBDEV_MBUS_CODE_CSC_XFER_FUNC 0x00000002 +#define V4L2_SUBDEV_MBUS_CODE_CSC_YCBCR_ENC 0x00000004 +#define V4L2_SUBDEV_MBUS_CODE_CSC_HSV_ENC V4L2_SUBDEV_MBUS_CODE_CSC_YCBCR_ENC +#define V4L2_SUBDEV_MBUS_CODE_CSC_QUANTIZATION 0x00000008 + +/** + * struct v4l2_subdev_mbus_code_enum - Media bus format enumeration + * @pad: pad number, as reported by the media API + * @index: format index during enumeration + * @code: format code (MEDIA_BUS_FMT_ definitions) + * @which: format type (from enum v4l2_subdev_format_whence) + * @flags: flags set by the driver, (V4L2_SUBDEV_MBUS_CODE_*) + * @stream: stream number, defined in subdev routing + * @reserved: drivers and applications must zero this array + */ +struct v4l2_subdev_mbus_code_enum { + __u32 pad; + __u32 index; + __u32 code; + __u32 which; + __u32 flags; + __u32 stream; + __u32 reserved[6]; +}; + +/** + * struct v4l2_subdev_frame_size_enum - Media bus format enumeration + * @index: format index during enumeration + * @pad: pad number, as reported by the media API + * @code: format code (MEDIA_BUS_FMT_ definitions) + * @min_width: minimum frame width, in pixels + * @max_width: maximum frame width, in pixels + * @min_height: minimum frame height, in pixels + * @max_height: maximum frame height, in pixels + * @which: format type (from enum v4l2_subdev_format_whence) + * @stream: stream number, defined in subdev routing + * @reserved: drivers and applications must zero this array + */ +struct v4l2_subdev_frame_size_enum { + __u32 index; + __u32 pad; + __u32 code; + __u32 min_width; + __u32 max_width; + __u32 min_height; + __u32 max_height; + __u32 which; + __u32 stream; + __u32 reserved[7]; +}; + +/** + * struct v4l2_subdev_frame_interval - Pad-level frame rate + * @pad: pad number, as reported by the media API + * @interval: frame interval in seconds + * @stream: stream number, defined in subdev routing + * @which: interval type (from enum v4l2_subdev_format_whence) + * @reserved: drivers and applications must zero this array + */ +struct v4l2_subdev_frame_interval { + __u32 pad; + struct v4l2_fract interval; + __u32 stream; + __u32 which; + __u32 reserved[7]; +}; + +/** + * struct v4l2_subdev_frame_interval_enum - Frame interval enumeration + * @pad: pad number, as reported by the media API + * @index: frame interval index during enumeration + * @code: format code (MEDIA_BUS_FMT_ definitions) + * @width: frame width in pixels + * @height: frame height in pixels + * @interval: frame interval in seconds + * @which: interval type (from enum v4l2_subdev_format_whence) + * @stream: stream number, defined in subdev routing + * @reserved: drivers and applications must zero this array + */ +struct v4l2_subdev_frame_interval_enum { + __u32 index; + __u32 pad; + __u32 code; + __u32 width; + __u32 height; + struct v4l2_fract interval; + __u32 which; + __u32 stream; + __u32 reserved[7]; +}; + +/** + * struct v4l2_subdev_selection - selection info + * + * @which: either V4L2_SUBDEV_FORMAT_ACTIVE or V4L2_SUBDEV_FORMAT_TRY + * @pad: pad number, as reported by the media API + * @target: Selection target, used to choose one of possible rectangles, + * defined in v4l2-common.h; V4L2_SEL_TGT_* . + * @flags: constraint flags, defined in v4l2-common.h; V4L2_SEL_FLAG_*. + * @r: coordinates of the selection window + * @stream: stream number, defined in subdev routing + * @reserved: for future use, set to zero for now + * + * Hardware may use multiple helper windows to process a video stream. + * The structure is used to exchange this selection areas between + * an application and a driver. + */ +struct v4l2_subdev_selection { + __u32 which; + __u32 pad; + __u32 target; + __u32 flags; + struct v4l2_rect r; + __u32 stream; + __u32 reserved[7]; +}; + +/** + * struct v4l2_subdev_capability - subdev capabilities + * @version: the driver versioning number + * @capabilities: the subdev capabilities, see V4L2_SUBDEV_CAP_* + * @reserved: for future use, set to zero for now + */ +struct v4l2_subdev_capability { + __u32 version; + __u32 capabilities; + __u32 reserved[14]; +}; + +/* The v4l2 sub-device video device node is registered in read-only mode. */ +#define V4L2_SUBDEV_CAP_RO_SUBDEV 0x00000001 + +/* The v4l2 sub-device supports routing and multiplexed streams. */ +#define V4L2_SUBDEV_CAP_STREAMS 0x00000002 + +/* + * Is the route active? An active route will start when streaming is enabled + * on a video node. + */ +#define V4L2_SUBDEV_ROUTE_FL_ACTIVE (1U << 0) + +/** + * struct v4l2_subdev_route - A route inside a subdev + * + * @sink_pad: the sink pad index + * @sink_stream: the sink stream identifier + * @source_pad: the source pad index + * @source_stream: the source stream identifier + * @flags: route flags V4L2_SUBDEV_ROUTE_FL_* + * @reserved: drivers and applications must zero this array + */ +struct v4l2_subdev_route { + __u32 sink_pad; + __u32 sink_stream; + __u32 source_pad; + __u32 source_stream; + __u32 flags; + __u32 reserved[5]; +}; + +/** + * struct v4l2_subdev_routing - Subdev routing information + * + * @which: configuration type (from enum v4l2_subdev_format_whence) + * @len_routes: the length of the routes array, in routes; set by the user, not + * modified by the kernel + * @routes: pointer to the routes array + * @num_routes: the total number of routes, possibly more than fits in the + * routes array + * @reserved: drivers and applications must zero this array + */ +struct v4l2_subdev_routing { + __u32 which; + __u32 len_routes; + __u64 routes; + __u32 num_routes; + __u32 reserved[11]; +}; + +/* + * The client is aware of streams. Setting this flag enables the use of 'stream' + * fields (referring to the stream number) with various ioctls. If this is not + * set (which is the default), the 'stream' fields will be forced to 0 by the + * kernel. + */ +#define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1ULL << 0) + +/* + * The client is aware of the struct v4l2_subdev_frame_interval which field. If + * this is not set (which is the default), the which field is forced to + * V4L2_SUBDEV_FORMAT_ACTIVE by the kernel. + */ +#define V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH (1ULL << 1) + +/** + * struct v4l2_subdev_client_capability - Capabilities of the client accessing + * the subdev + * + * @capabilities: A bitmask of V4L2_SUBDEV_CLIENT_CAP_* flags. + */ +struct v4l2_subdev_client_capability { + __u64 capabilities; +}; + +/* Backwards compatibility define --- to be removed */ +#define v4l2_subdev_edid v4l2_edid + +#define VIDIOC_SUBDEV_QUERYCAP _IOR('V', 0, struct v4l2_subdev_capability) +#define VIDIOC_SUBDEV_G_FMT _IOWR('V', 4, struct v4l2_subdev_format) +#define VIDIOC_SUBDEV_S_FMT _IOWR('V', 5, struct v4l2_subdev_format) +#define VIDIOC_SUBDEV_G_FRAME_INTERVAL _IOWR('V', 21, struct v4l2_subdev_frame_interval) +#define VIDIOC_SUBDEV_S_FRAME_INTERVAL _IOWR('V', 22, struct v4l2_subdev_frame_interval) +#define VIDIOC_SUBDEV_ENUM_MBUS_CODE _IOWR('V', 2, struct v4l2_subdev_mbus_code_enum) +#define VIDIOC_SUBDEV_ENUM_FRAME_SIZE _IOWR('V', 74, struct v4l2_subdev_frame_size_enum) +#define VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL _IOWR('V', 75, struct v4l2_subdev_frame_interval_enum) +#define VIDIOC_SUBDEV_G_CROP _IOWR('V', 59, struct v4l2_subdev_crop) +#define VIDIOC_SUBDEV_S_CROP _IOWR('V', 60, struct v4l2_subdev_crop) +#define VIDIOC_SUBDEV_G_SELECTION _IOWR('V', 61, struct v4l2_subdev_selection) +#define VIDIOC_SUBDEV_S_SELECTION _IOWR('V', 62, struct v4l2_subdev_selection) +#define VIDIOC_SUBDEV_G_ROUTING _IOWR('V', 38, struct v4l2_subdev_routing) +#define VIDIOC_SUBDEV_S_ROUTING _IOWR('V', 39, struct v4l2_subdev_routing) +#define VIDIOC_SUBDEV_G_CLIENT_CAP _IOR('V', 101, struct v4l2_subdev_client_capability) +#define VIDIOC_SUBDEV_S_CLIENT_CAP _IOWR('V', 102, struct v4l2_subdev_client_capability) + +/* The following ioctls are identical to the ioctls in videodev2.h */ +#define VIDIOC_SUBDEV_G_STD _IOR('V', 23, v4l2_std_id) +#define VIDIOC_SUBDEV_S_STD _IOW('V', 24, v4l2_std_id) +#define VIDIOC_SUBDEV_ENUMSTD _IOWR('V', 25, struct v4l2_standard) +#define VIDIOC_SUBDEV_G_EDID _IOWR('V', 40, struct v4l2_edid) +#define VIDIOC_SUBDEV_S_EDID _IOWR('V', 41, struct v4l2_edid) +#define VIDIOC_SUBDEV_QUERYSTD _IOR('V', 63, v4l2_std_id) +#define VIDIOC_SUBDEV_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings) +#define VIDIOC_SUBDEV_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings) +#define VIDIOC_SUBDEV_ENUM_DV_TIMINGS _IOWR('V', 98, struct v4l2_enum_dv_timings) +#define VIDIOC_SUBDEV_QUERY_DV_TIMINGS _IOR('V', 99, struct v4l2_dv_timings) +#define VIDIOC_SUBDEV_DV_TIMINGS_CAP _IOWR('V', 100, struct v4l2_dv_timings_cap) + +#endif diff --git a/spider-cam/libcamera/include/linux/videodev2.h b/spider-cam/libcamera/include/linux/videodev2.h new file mode 100644 index 0000000..7fe522e --- /dev/null +++ b/spider-cam/libcamera/include/linux/videodev2.h @@ -0,0 +1,2735 @@ +/* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* + * Video for Linux Two header file + * + * Copyright (C) 1999-2012 the contributors + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Alternatively you can redistribute this file under the terms of the + * BSD license as stated below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. The names of its contributors may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Header file for v4l or V4L2 drivers and applications + * with public API. + * All kernel-specific stuff were moved to media/v4l2-dev.h, so + * no #if __KERNEL tests are allowed here + * + * See https://linuxtv.org for more info + * + * Author: Bill Dirks + * Justin Schoeman + * Hans Verkuil + * et al. + */ +#ifndef __LINUX_VIDEODEV2_H +#define __LINUX_VIDEODEV2_H + +#include + +#include +#include +#include +#include + +/* + * Common stuff for both V4L1 and V4L2 + * Moved from videodev.h + */ +#define VIDEO_MAX_FRAME 32 +#define VIDEO_MAX_PLANES 8 + +/* + * M I S C E L L A N E O U S + */ + +/* Four-character-code (FOURCC) */ +#define v4l2_fourcc(a, b, c, d)\ + ((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24)) +#define v4l2_fourcc_be(a, b, c, d) (v4l2_fourcc(a, b, c, d) | (1U << 31)) + +/* + * E N U M S + */ +enum v4l2_field { + V4L2_FIELD_ANY = 0, /* driver can choose from none, + top, bottom, interlaced + depending on whatever it thinks + is approximate ... */ + V4L2_FIELD_NONE = 1, /* this device has no fields ... */ + V4L2_FIELD_TOP = 2, /* top field only */ + V4L2_FIELD_BOTTOM = 3, /* bottom field only */ + V4L2_FIELD_INTERLACED = 4, /* both fields interlaced */ + V4L2_FIELD_SEQ_TB = 5, /* both fields sequential into one + buffer, top-bottom order */ + V4L2_FIELD_SEQ_BT = 6, /* same as above + bottom-top order */ + V4L2_FIELD_ALTERNATE = 7, /* both fields alternating into + separate buffers */ + V4L2_FIELD_INTERLACED_TB = 8, /* both fields interlaced, top field + first and the top field is + transmitted first */ + V4L2_FIELD_INTERLACED_BT = 9, /* both fields interlaced, top field + first and the bottom field is + transmitted first */ +}; +#define V4L2_FIELD_HAS_TOP(field) \ + ((field) == V4L2_FIELD_TOP ||\ + (field) == V4L2_FIELD_INTERLACED ||\ + (field) == V4L2_FIELD_INTERLACED_TB ||\ + (field) == V4L2_FIELD_INTERLACED_BT ||\ + (field) == V4L2_FIELD_SEQ_TB ||\ + (field) == V4L2_FIELD_SEQ_BT) +#define V4L2_FIELD_HAS_BOTTOM(field) \ + ((field) == V4L2_FIELD_BOTTOM ||\ + (field) == V4L2_FIELD_INTERLACED ||\ + (field) == V4L2_FIELD_INTERLACED_TB ||\ + (field) == V4L2_FIELD_INTERLACED_BT ||\ + (field) == V4L2_FIELD_SEQ_TB ||\ + (field) == V4L2_FIELD_SEQ_BT) +#define V4L2_FIELD_HAS_BOTH(field) \ + ((field) == V4L2_FIELD_INTERLACED ||\ + (field) == V4L2_FIELD_INTERLACED_TB ||\ + (field) == V4L2_FIELD_INTERLACED_BT ||\ + (field) == V4L2_FIELD_SEQ_TB ||\ + (field) == V4L2_FIELD_SEQ_BT) +#define V4L2_FIELD_HAS_T_OR_B(field) \ + ((field) == V4L2_FIELD_BOTTOM ||\ + (field) == V4L2_FIELD_TOP ||\ + (field) == V4L2_FIELD_ALTERNATE) +#define V4L2_FIELD_IS_INTERLACED(field) \ + ((field) == V4L2_FIELD_INTERLACED ||\ + (field) == V4L2_FIELD_INTERLACED_TB ||\ + (field) == V4L2_FIELD_INTERLACED_BT) +#define V4L2_FIELD_IS_SEQUENTIAL(field) \ + ((field) == V4L2_FIELD_SEQ_TB ||\ + (field) == V4L2_FIELD_SEQ_BT) + +enum v4l2_buf_type { + V4L2_BUF_TYPE_VIDEO_CAPTURE = 1, + V4L2_BUF_TYPE_VIDEO_OUTPUT = 2, + V4L2_BUF_TYPE_VIDEO_OVERLAY = 3, + V4L2_BUF_TYPE_VBI_CAPTURE = 4, + V4L2_BUF_TYPE_VBI_OUTPUT = 5, + V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6, + V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7, + V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8, + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE = 9, + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE = 10, + V4L2_BUF_TYPE_SDR_CAPTURE = 11, + V4L2_BUF_TYPE_SDR_OUTPUT = 12, + V4L2_BUF_TYPE_META_CAPTURE = 13, + V4L2_BUF_TYPE_META_OUTPUT = 14, + /* Deprecated, do not use */ + V4L2_BUF_TYPE_PRIVATE = 0x80, +}; + +#define V4L2_TYPE_IS_MULTIPLANAR(type) \ + ((type) == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE \ + || (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + +#define V4L2_TYPE_IS_OUTPUT(type) \ + ((type) == V4L2_BUF_TYPE_VIDEO_OUTPUT \ + || (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE \ + || (type) == V4L2_BUF_TYPE_VIDEO_OVERLAY \ + || (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY \ + || (type) == V4L2_BUF_TYPE_VBI_OUTPUT \ + || (type) == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT \ + || (type) == V4L2_BUF_TYPE_SDR_OUTPUT \ + || (type) == V4L2_BUF_TYPE_META_OUTPUT) + +#define V4L2_TYPE_IS_CAPTURE(type) (!V4L2_TYPE_IS_OUTPUT(type)) + +enum v4l2_tuner_type { + V4L2_TUNER_RADIO = 1, + V4L2_TUNER_ANALOG_TV = 2, + V4L2_TUNER_DIGITAL_TV = 3, + V4L2_TUNER_SDR = 4, + V4L2_TUNER_RF = 5, +}; + +/* Deprecated, do not use */ +#define V4L2_TUNER_ADC V4L2_TUNER_SDR + +enum v4l2_memory { + V4L2_MEMORY_MMAP = 1, + V4L2_MEMORY_USERPTR = 2, + V4L2_MEMORY_OVERLAY = 3, + V4L2_MEMORY_DMABUF = 4, +}; + +/* see also http://vektor.theorem.ca/graphics/ycbcr/ */ +enum v4l2_colorspace { + /* + * Default colorspace, i.e. let the driver figure it out. + * Can only be used with video capture. + */ + V4L2_COLORSPACE_DEFAULT = 0, + + /* SMPTE 170M: used for broadcast NTSC/PAL SDTV */ + V4L2_COLORSPACE_SMPTE170M = 1, + + /* Obsolete pre-1998 SMPTE 240M HDTV standard, superseded by Rec 709 */ + V4L2_COLORSPACE_SMPTE240M = 2, + + /* Rec.709: used for HDTV */ + V4L2_COLORSPACE_REC709 = 3, + + /* + * Deprecated, do not use. No driver will ever return this. This was + * based on a misunderstanding of the bt878 datasheet. + */ + V4L2_COLORSPACE_BT878 = 4, + + /* + * NTSC 1953 colorspace. This only makes sense when dealing with + * really, really old NTSC recordings. Superseded by SMPTE 170M. + */ + V4L2_COLORSPACE_470_SYSTEM_M = 5, + + /* + * EBU Tech 3213 PAL/SECAM colorspace. + */ + V4L2_COLORSPACE_470_SYSTEM_BG = 6, + + /* + * Effectively shorthand for V4L2_COLORSPACE_SRGB, V4L2_YCBCR_ENC_601 + * and V4L2_QUANTIZATION_FULL_RANGE. To be used for (Motion-)JPEG. + */ + V4L2_COLORSPACE_JPEG = 7, + + /* For RGB colorspaces such as produces by most webcams. */ + V4L2_COLORSPACE_SRGB = 8, + + /* opRGB colorspace */ + V4L2_COLORSPACE_OPRGB = 9, + + /* BT.2020 colorspace, used for UHDTV. */ + V4L2_COLORSPACE_BT2020 = 10, + + /* Raw colorspace: for RAW unprocessed images */ + V4L2_COLORSPACE_RAW = 11, + + /* DCI-P3 colorspace, used by cinema projectors */ + V4L2_COLORSPACE_DCI_P3 = 12, + +}; + +/* + * Determine how COLORSPACE_DEFAULT should map to a proper colorspace. + * This depends on whether this is a SDTV image (use SMPTE 170M), an + * HDTV image (use Rec. 709), or something else (use sRGB). + */ +#define V4L2_MAP_COLORSPACE_DEFAULT(is_sdtv, is_hdtv) \ + ((is_sdtv) ? V4L2_COLORSPACE_SMPTE170M : \ + ((is_hdtv) ? V4L2_COLORSPACE_REC709 : V4L2_COLORSPACE_SRGB)) + +enum v4l2_xfer_func { + /* + * Mapping of V4L2_XFER_FUNC_DEFAULT to actual transfer functions + * for the various colorspaces: + * + * V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M, + * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_REC709 and + * V4L2_COLORSPACE_BT2020: V4L2_XFER_FUNC_709 + * + * V4L2_COLORSPACE_SRGB, V4L2_COLORSPACE_JPEG: V4L2_XFER_FUNC_SRGB + * + * V4L2_COLORSPACE_OPRGB: V4L2_XFER_FUNC_OPRGB + * + * V4L2_COLORSPACE_SMPTE240M: V4L2_XFER_FUNC_SMPTE240M + * + * V4L2_COLORSPACE_RAW: V4L2_XFER_FUNC_NONE + * + * V4L2_COLORSPACE_DCI_P3: V4L2_XFER_FUNC_DCI_P3 + */ + V4L2_XFER_FUNC_DEFAULT = 0, + V4L2_XFER_FUNC_709 = 1, + V4L2_XFER_FUNC_SRGB = 2, + V4L2_XFER_FUNC_OPRGB = 3, + V4L2_XFER_FUNC_SMPTE240M = 4, + V4L2_XFER_FUNC_NONE = 5, + V4L2_XFER_FUNC_DCI_P3 = 6, + V4L2_XFER_FUNC_SMPTE2084 = 7, +}; + +/* + * Determine how XFER_FUNC_DEFAULT should map to a proper transfer function. + * This depends on the colorspace. + */ +#define V4L2_MAP_XFER_FUNC_DEFAULT(colsp) \ + ((colsp) == V4L2_COLORSPACE_OPRGB ? V4L2_XFER_FUNC_OPRGB : \ + ((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_XFER_FUNC_SMPTE240M : \ + ((colsp) == V4L2_COLORSPACE_DCI_P3 ? V4L2_XFER_FUNC_DCI_P3 : \ + ((colsp) == V4L2_COLORSPACE_RAW ? V4L2_XFER_FUNC_NONE : \ + ((colsp) == V4L2_COLORSPACE_SRGB || (colsp) == V4L2_COLORSPACE_JPEG ? \ + V4L2_XFER_FUNC_SRGB : V4L2_XFER_FUNC_709))))) + +enum v4l2_ycbcr_encoding { + /* + * Mapping of V4L2_YCBCR_ENC_DEFAULT to actual encodings for the + * various colorspaces: + * + * V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M, + * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_SRGB, + * V4L2_COLORSPACE_OPRGB and V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601 + * + * V4L2_COLORSPACE_REC709 and V4L2_COLORSPACE_DCI_P3: V4L2_YCBCR_ENC_709 + * + * V4L2_COLORSPACE_BT2020: V4L2_YCBCR_ENC_BT2020 + * + * V4L2_COLORSPACE_SMPTE240M: V4L2_YCBCR_ENC_SMPTE240M + */ + V4L2_YCBCR_ENC_DEFAULT = 0, + + /* ITU-R 601 -- SDTV */ + V4L2_YCBCR_ENC_601 = 1, + + /* Rec. 709 -- HDTV */ + V4L2_YCBCR_ENC_709 = 2, + + /* ITU-R 601/EN 61966-2-4 Extended Gamut -- SDTV */ + V4L2_YCBCR_ENC_XV601 = 3, + + /* Rec. 709/EN 61966-2-4 Extended Gamut -- HDTV */ + V4L2_YCBCR_ENC_XV709 = 4, + + /* + * sYCC (Y'CbCr encoding of sRGB), identical to ENC_601. It was added + * originally due to a misunderstanding of the sYCC standard. It should + * not be used, instead use V4L2_YCBCR_ENC_601. + */ + V4L2_YCBCR_ENC_SYCC = 5, + + /* BT.2020 Non-constant Luminance Y'CbCr */ + V4L2_YCBCR_ENC_BT2020 = 6, + + /* BT.2020 Constant Luminance Y'CbcCrc */ + V4L2_YCBCR_ENC_BT2020_CONST_LUM = 7, + + /* SMPTE 240M -- Obsolete HDTV */ + V4L2_YCBCR_ENC_SMPTE240M = 8, +}; + +/* + * enum v4l2_hsv_encoding values should not collide with the ones from + * enum v4l2_ycbcr_encoding. + */ +enum v4l2_hsv_encoding { + + /* Hue mapped to 0 - 179 */ + V4L2_HSV_ENC_180 = 128, + + /* Hue mapped to 0-255 */ + V4L2_HSV_ENC_256 = 129, +}; + +/* + * Determine how YCBCR_ENC_DEFAULT should map to a proper Y'CbCr encoding. + * This depends on the colorspace. + */ +#define V4L2_MAP_YCBCR_ENC_DEFAULT(colsp) \ + (((colsp) == V4L2_COLORSPACE_REC709 || \ + (colsp) == V4L2_COLORSPACE_DCI_P3) ? V4L2_YCBCR_ENC_709 : \ + ((colsp) == V4L2_COLORSPACE_BT2020 ? V4L2_YCBCR_ENC_BT2020 : \ + ((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_YCBCR_ENC_SMPTE240M : \ + V4L2_YCBCR_ENC_601))) + +enum v4l2_quantization { + /* + * The default for R'G'B' quantization is always full range. + * For Y'CbCr the quantization is always limited range, except + * for COLORSPACE_JPEG: this is full range. + */ + V4L2_QUANTIZATION_DEFAULT = 0, + V4L2_QUANTIZATION_FULL_RANGE = 1, + V4L2_QUANTIZATION_LIM_RANGE = 2, +}; + +/* + * Determine how QUANTIZATION_DEFAULT should map to a proper quantization. + * This depends on whether the image is RGB or not, the colorspace. + * The Y'CbCr encoding is not used anymore, but is still there for backwards + * compatibility. + */ +#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb_or_hsv, colsp, ycbcr_enc) \ + (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \ + V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE) + +/* + * Deprecated names for opRGB colorspace (IEC 61966-2-5) + * + * WARNING: Please don't use these deprecated defines in your code, as + * there is a chance we have to remove them in the future. + */ +#define V4L2_COLORSPACE_ADOBERGB V4L2_COLORSPACE_OPRGB +#define V4L2_XFER_FUNC_ADOBERGB V4L2_XFER_FUNC_OPRGB + +enum v4l2_priority { + V4L2_PRIORITY_UNSET = 0, /* not initialized */ + V4L2_PRIORITY_BACKGROUND = 1, + V4L2_PRIORITY_INTERACTIVE = 2, + V4L2_PRIORITY_RECORD = 3, + V4L2_PRIORITY_DEFAULT = V4L2_PRIORITY_INTERACTIVE, +}; + +struct v4l2_rect { + __s32 left; + __s32 top; + __u32 width; + __u32 height; +}; + +struct v4l2_fract { + __u32 numerator; + __u32 denominator; +}; + +struct v4l2_area { + __u32 width; + __u32 height; +}; + +/** + * struct v4l2_capability - Describes V4L2 device caps returned by VIDIOC_QUERYCAP + * + * @driver: name of the driver module (e.g. "bttv") + * @card: name of the card (e.g. "Hauppauge WinTV") + * @bus_info: name of the bus (e.g. "PCI:" + pci_name(pci_dev) ) + * @version: KERNEL_VERSION + * @capabilities: capabilities of the physical device as a whole + * @device_caps: capabilities accessed via this particular device (node) + * @reserved: reserved fields for future extensions + */ +struct v4l2_capability { + __u8 driver[16]; + __u8 card[32]; + __u8 bus_info[32]; + __u32 version; + __u32 capabilities; + __u32 device_caps; + __u32 reserved[3]; +}; + +/* Values for 'capabilities' field */ +#define V4L2_CAP_VIDEO_CAPTURE 0x00000001 /* Is a video capture device */ +#define V4L2_CAP_VIDEO_OUTPUT 0x00000002 /* Is a video output device */ +#define V4L2_CAP_VIDEO_OVERLAY 0x00000004 /* Can do video overlay */ +#define V4L2_CAP_VBI_CAPTURE 0x00000010 /* Is a raw VBI capture device */ +#define V4L2_CAP_VBI_OUTPUT 0x00000020 /* Is a raw VBI output device */ +#define V4L2_CAP_SLICED_VBI_CAPTURE 0x00000040 /* Is a sliced VBI capture device */ +#define V4L2_CAP_SLICED_VBI_OUTPUT 0x00000080 /* Is a sliced VBI output device */ +#define V4L2_CAP_RDS_CAPTURE 0x00000100 /* RDS data capture */ +#define V4L2_CAP_VIDEO_OUTPUT_OVERLAY 0x00000200 /* Can do video output overlay */ +#define V4L2_CAP_HW_FREQ_SEEK 0x00000400 /* Can do hardware frequency seek */ +#define V4L2_CAP_RDS_OUTPUT 0x00000800 /* Is an RDS encoder */ + +/* Is a video capture device that supports multiplanar formats */ +#define V4L2_CAP_VIDEO_CAPTURE_MPLANE 0x00001000 +/* Is a video output device that supports multiplanar formats */ +#define V4L2_CAP_VIDEO_OUTPUT_MPLANE 0x00002000 +/* Is a video mem-to-mem device that supports multiplanar formats */ +#define V4L2_CAP_VIDEO_M2M_MPLANE 0x00004000 +/* Is a video mem-to-mem device */ +#define V4L2_CAP_VIDEO_M2M 0x00008000 + +#define V4L2_CAP_TUNER 0x00010000 /* has a tuner */ +#define V4L2_CAP_AUDIO 0x00020000 /* has audio support */ +#define V4L2_CAP_RADIO 0x00040000 /* is a radio device */ +#define V4L2_CAP_MODULATOR 0x00080000 /* has a modulator */ + +#define V4L2_CAP_SDR_CAPTURE 0x00100000 /* Is a SDR capture device */ +#define V4L2_CAP_EXT_PIX_FORMAT 0x00200000 /* Supports the extended pixel format */ +#define V4L2_CAP_SDR_OUTPUT 0x00400000 /* Is a SDR output device */ +#define V4L2_CAP_META_CAPTURE 0x00800000 /* Is a metadata capture device */ + +#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */ +#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */ +#define V4L2_CAP_META_OUTPUT 0x08000000 /* Is a metadata output device */ + +#define V4L2_CAP_TOUCH 0x10000000 /* Is a touch device */ + +#define V4L2_CAP_IO_MC 0x20000000 /* Is input/output controlled by the media controller */ + +#define V4L2_CAP_DEVICE_CAPS 0x80000000 /* sets device capabilities field */ + +/* + * V I D E O I M A G E F O R M A T + */ +struct v4l2_pix_format { + __u32 width; + __u32 height; + __u32 pixelformat; + __u32 field; /* enum v4l2_field */ + __u32 bytesperline; /* for padding, zero if unused */ + __u32 sizeimage; + __u32 colorspace; /* enum v4l2_colorspace */ + __u32 priv; /* private data, depends on pixelformat */ + __u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */ + union { + /* enum v4l2_ycbcr_encoding */ + __u32 ycbcr_enc; + /* enum v4l2_hsv_encoding */ + __u32 hsv_enc; + }; + __u32 quantization; /* enum v4l2_quantization */ + __u32 xfer_func; /* enum v4l2_xfer_func */ +}; + +/* Pixel format FOURCC depth Description */ + +/* RGB formats (1 or 2 bytes per pixel) */ +#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R', 'G', 'B', '1') /* 8 RGB-3-3-2 */ +#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R', '4', '4', '4') /* 16 xxxxrrrr ggggbbbb */ +#define V4L2_PIX_FMT_ARGB444 v4l2_fourcc('A', 'R', '1', '2') /* 16 aaaarrrr ggggbbbb */ +#define V4L2_PIX_FMT_XRGB444 v4l2_fourcc('X', 'R', '1', '2') /* 16 xxxxrrrr ggggbbbb */ +#define V4L2_PIX_FMT_RGBA444 v4l2_fourcc('R', 'A', '1', '2') /* 16 rrrrgggg bbbbaaaa */ +#define V4L2_PIX_FMT_RGBX444 v4l2_fourcc('R', 'X', '1', '2') /* 16 rrrrgggg bbbbxxxx */ +#define V4L2_PIX_FMT_ABGR444 v4l2_fourcc('A', 'B', '1', '2') /* 16 aaaabbbb ggggrrrr */ +#define V4L2_PIX_FMT_XBGR444 v4l2_fourcc('X', 'B', '1', '2') /* 16 xxxxbbbb ggggrrrr */ +#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('G', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */ +#define V4L2_PIX_FMT_BGRX444 v4l2_fourcc('B', 'X', '1', '2') /* 16 bbbbgggg rrrrxxxx */ +#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */ +#define V4L2_PIX_FMT_ARGB555 v4l2_fourcc('A', 'R', '1', '5') /* 16 ARGB-1-5-5-5 */ +#define V4L2_PIX_FMT_XRGB555 v4l2_fourcc('X', 'R', '1', '5') /* 16 XRGB-1-5-5-5 */ +#define V4L2_PIX_FMT_RGBA555 v4l2_fourcc('R', 'A', '1', '5') /* 16 RGBA-5-5-5-1 */ +#define V4L2_PIX_FMT_RGBX555 v4l2_fourcc('R', 'X', '1', '5') /* 16 RGBX-5-5-5-1 */ +#define V4L2_PIX_FMT_ABGR555 v4l2_fourcc('A', 'B', '1', '5') /* 16 ABGR-1-5-5-5 */ +#define V4L2_PIX_FMT_XBGR555 v4l2_fourcc('X', 'B', '1', '5') /* 16 XBGR-1-5-5-5 */ +#define V4L2_PIX_FMT_BGRA555 v4l2_fourcc('B', 'A', '1', '5') /* 16 BGRA-5-5-5-1 */ +#define V4L2_PIX_FMT_BGRX555 v4l2_fourcc('B', 'X', '1', '5') /* 16 BGRX-5-5-5-1 */ +#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */ +#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */ +#define V4L2_PIX_FMT_ARGB555X v4l2_fourcc_be('A', 'R', '1', '5') /* 16 ARGB-5-5-5 BE */ +#define V4L2_PIX_FMT_XRGB555X v4l2_fourcc_be('X', 'R', '1', '5') /* 16 XRGB-5-5-5 BE */ +#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */ + +/* RGB formats (3 or 4 bytes per pixel) */ +#define V4L2_PIX_FMT_BGR666 v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */ +#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */ +#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */ +#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */ +#define V4L2_PIX_FMT_ABGR32 v4l2_fourcc('A', 'R', '2', '4') /* 32 BGRA-8-8-8-8 */ +#define V4L2_PIX_FMT_XBGR32 v4l2_fourcc('X', 'R', '2', '4') /* 32 BGRX-8-8-8-8 */ +#define V4L2_PIX_FMT_BGRA32 v4l2_fourcc('R', 'A', '2', '4') /* 32 ABGR-8-8-8-8 */ +#define V4L2_PIX_FMT_BGRX32 v4l2_fourcc('R', 'X', '2', '4') /* 32 XBGR-8-8-8-8 */ +#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R', 'G', 'B', '4') /* 32 RGB-8-8-8-8 */ +#define V4L2_PIX_FMT_RGBA32 v4l2_fourcc('A', 'B', '2', '4') /* 32 RGBA-8-8-8-8 */ +#define V4L2_PIX_FMT_RGBX32 v4l2_fourcc('X', 'B', '2', '4') /* 32 RGBX-8-8-8-8 */ +#define V4L2_PIX_FMT_ARGB32 v4l2_fourcc('B', 'A', '2', '4') /* 32 ARGB-8-8-8-8 */ +#define V4L2_PIX_FMT_XRGB32 v4l2_fourcc('B', 'X', '2', '4') /* 32 XRGB-8-8-8-8 */ +#define V4L2_PIX_FMT_RGBX1010102 v4l2_fourcc('R', 'X', '3', '0') /* 32 RGBX-10-10-10-2 */ +#define V4L2_PIX_FMT_RGBA1010102 v4l2_fourcc('R', 'A', '3', '0') /* 32 RGBA-10-10-10-2 */ +#define V4L2_PIX_FMT_ARGB2101010 v4l2_fourcc('A', 'R', '3', '0') /* 32 ARGB-2-10-10-10 */ + +/* RGB formats (6 or 8 bytes per pixel) */ +#define V4L2_PIX_FMT_BGR48_12 v4l2_fourcc('B', '3', '1', '2') /* 48 BGR 12-bit per component */ +#define V4L2_PIX_FMT_BGR48 v4l2_fourcc('B', 'G', 'R', '6') /* 48 BGR 16-bit per component */ +#define V4L2_PIX_FMT_RGB48 v4l2_fourcc('R', 'G', 'B', '6') /* 48 RGB 16-bit per component */ +#define V4L2_PIX_FMT_ABGR64_12 v4l2_fourcc('B', '4', '1', '2') /* 64 BGRA 12-bit per component */ + +/* Grey formats */ +#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */ +#define V4L2_PIX_FMT_Y4 v4l2_fourcc('Y', '0', '4', ' ') /* 4 Greyscale */ +#define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */ +#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */ +#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */ +#define V4L2_PIX_FMT_Y012 v4l2_fourcc('Y', '0', '1', '2') /* 12 Greyscale */ +#define V4L2_PIX_FMT_Y14 v4l2_fourcc('Y', '1', '4', ' ') /* 14 Greyscale */ +#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */ +#define V4L2_PIX_FMT_Y16_BE v4l2_fourcc_be('Y', '1', '6', ' ') /* 16 Greyscale BE */ + +/* Grey bit-packed formats */ +#define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */ +#define V4L2_PIX_FMT_Y10P v4l2_fourcc('Y', '1', '0', 'P') /* 10 Greyscale, MIPI RAW10 packed */ +#define V4L2_PIX_FMT_IPU3_Y10 v4l2_fourcc('i', 'p', '3', 'y') /* IPU3 packed 10-bit greyscale */ +#define V4L2_PIX_FMT_Y12P v4l2_fourcc('Y', '1', '2', 'P') /* 12 Greyscale, MIPI RAW12 packed */ +#define V4L2_PIX_FMT_Y14P v4l2_fourcc('Y', '1', '4', 'P') /* 14 Greyscale, MIPI RAW14 packed */ + +/* Palette formats */ +#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */ + +/* Chrominance formats */ +#define V4L2_PIX_FMT_UV8 v4l2_fourcc('U', 'V', '8', ' ') /* 8 UV 4:4 */ + +/* Luminance+Chrominance formats */ +#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y', 'Y', 'U', 'V') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */ +#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_VYUY v4l2_fourcc('V', 'Y', 'U', 'Y') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */ +#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y', '4', '4', '4') /* 16 xxxxyyyy uuuuvvvv */ +#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */ +#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */ +#define V4L2_PIX_FMT_YUV24 v4l2_fourcc('Y', 'U', 'V', '3') /* 24 YUV-8-8-8 */ +#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */ +#define V4L2_PIX_FMT_AYUV32 v4l2_fourcc('A', 'Y', 'U', 'V') /* 32 AYUV-8-8-8-8 */ +#define V4L2_PIX_FMT_XYUV32 v4l2_fourcc('X', 'Y', 'U', 'V') /* 32 XYUV-8-8-8-8 */ +#define V4L2_PIX_FMT_VUYA32 v4l2_fourcc('V', 'U', 'Y', 'A') /* 32 VUYA-8-8-8-8 */ +#define V4L2_PIX_FMT_VUYX32 v4l2_fourcc('V', 'U', 'Y', 'X') /* 32 VUYX-8-8-8-8 */ +#define V4L2_PIX_FMT_YUVA32 v4l2_fourcc('Y', 'U', 'V', 'A') /* 32 YUVA-8-8-8-8 */ +#define V4L2_PIX_FMT_YUVX32 v4l2_fourcc('Y', 'U', 'V', 'X') /* 32 YUVX-8-8-8-8 */ +#define V4L2_PIX_FMT_M420 v4l2_fourcc('M', '4', '2', '0') /* 12 YUV 4:2:0 2 lines y, 1 line uv interleaved */ +#define V4L2_PIX_FMT_YUV48_12 v4l2_fourcc('Y', '3', '1', '2') /* 48 YUV 4:4:4 12-bit per component */ + +/* + * YCbCr packed format. For each Y2xx format, xx bits of valid data occupy the MSBs + * of the 16 bit components, and 16-xx bits of zero padding occupy the LSBs. + */ +#define V4L2_PIX_FMT_Y210 v4l2_fourcc('Y', '2', '1', '0') /* 32 YUYV 4:2:2 */ +#define V4L2_PIX_FMT_Y212 v4l2_fourcc('Y', '2', '1', '2') /* 32 YUYV 4:2:2 */ +#define V4L2_PIX_FMT_Y216 v4l2_fourcc('Y', '2', '1', '6') /* 32 YUYV 4:2:2 */ + +/* two planes -- one Y, one Cr + Cb interleaved */ +#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */ +#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */ +#define V4L2_PIX_FMT_NV16 v4l2_fourcc('N', 'V', '1', '6') /* 16 Y/CbCr 4:2:2 */ +#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */ +#define V4L2_PIX_FMT_NV24 v4l2_fourcc('N', 'V', '2', '4') /* 24 Y/CbCr 4:4:4 */ +#define V4L2_PIX_FMT_NV42 v4l2_fourcc('N', 'V', '4', '2') /* 24 Y/CrCb 4:4:4 */ +#define V4L2_PIX_FMT_P010 v4l2_fourcc('P', '0', '1', '0') /* 24 Y/CbCr 4:2:0 10-bit per component */ +#define V4L2_PIX_FMT_P012 v4l2_fourcc('P', '0', '1', '2') /* 24 Y/CbCr 4:2:0 12-bit per component */ + +/* two non contiguous planes - one Y, one Cr + Cb interleaved */ +#define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */ +#define V4L2_PIX_FMT_NV21M v4l2_fourcc('N', 'M', '2', '1') /* 21 Y/CrCb 4:2:0 */ +#define V4L2_PIX_FMT_NV16M v4l2_fourcc('N', 'M', '1', '6') /* 16 Y/CbCr 4:2:2 */ +#define V4L2_PIX_FMT_NV61M v4l2_fourcc('N', 'M', '6', '1') /* 16 Y/CrCb 4:2:2 */ +#define V4L2_PIX_FMT_P012M v4l2_fourcc('P', 'M', '1', '2') /* 24 Y/CbCr 4:2:0 12-bit per component */ + +/* three planes - Y Cb, Cr */ +#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */ +#define V4L2_PIX_FMT_YVU410 v4l2_fourcc('Y', 'V', 'U', '9') /* 9 YVU 4:1:0 */ +#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 12 YVU411 planar */ +#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y', 'U', '1', '2') /* 12 YUV 4:2:0 */ +#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */ +#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */ + +/* three non contiguous planes - Y, Cb, Cr */ +#define V4L2_PIX_FMT_YUV420M v4l2_fourcc('Y', 'M', '1', '2') /* 12 YUV420 planar */ +#define V4L2_PIX_FMT_YVU420M v4l2_fourcc('Y', 'M', '2', '1') /* 12 YVU420 planar */ +#define V4L2_PIX_FMT_YUV422M v4l2_fourcc('Y', 'M', '1', '6') /* 16 YUV422 planar */ +#define V4L2_PIX_FMT_YVU422M v4l2_fourcc('Y', 'M', '6', '1') /* 16 YVU422 planar */ +#define V4L2_PIX_FMT_YUV444M v4l2_fourcc('Y', 'M', '2', '4') /* 24 YUV444 planar */ +#define V4L2_PIX_FMT_YVU444M v4l2_fourcc('Y', 'M', '4', '2') /* 24 YVU444 planar */ + +/* Tiled YUV formats */ +#define V4L2_PIX_FMT_NV12_4L4 v4l2_fourcc('V', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 4x4 tiles */ +#define V4L2_PIX_FMT_NV12_16L16 v4l2_fourcc('H', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 tiles */ +#define V4L2_PIX_FMT_NV12_32L32 v4l2_fourcc('S', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 32x32 tiles */ +#define V4L2_PIX_FMT_NV15_4L4 v4l2_fourcc('V', 'T', '1', '5') /* 15 Y/CbCr 4:2:0 10-bit 4x4 tiles */ +#define V4L2_PIX_FMT_P010_4L4 v4l2_fourcc('T', '0', '1', '0') /* 12 Y/CbCr 4:2:0 10-bit 4x4 macroblocks */ +#define V4L2_PIX_FMT_NV12_8L128 v4l2_fourcc('A', 'T', '1', '2') /* Y/CbCr 4:2:0 8x128 tiles */ +#define V4L2_PIX_FMT_NV12_10BE_8L128 v4l2_fourcc_be('A', 'X', '1', '2') /* Y/CbCr 4:2:0 10-bit 8x128 tiles */ + +/* Tiled YUV formats, non contiguous planes */ +#define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 tiles */ +#define V4L2_PIX_FMT_NV12MT_16X16 v4l2_fourcc('V', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 tiles */ +#define V4L2_PIX_FMT_NV12M_8L128 v4l2_fourcc('N', 'A', '1', '2') /* Y/CbCr 4:2:0 8x128 tiles */ +#define V4L2_PIX_FMT_NV12M_10BE_8L128 v4l2_fourcc_be('N', 'T', '1', '2') /* Y/CbCr 4:2:0 10-bit 8x128 tiles */ + +/* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */ +#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */ +#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */ +#define V4L2_PIX_FMT_SRGGB8 v4l2_fourcc('R', 'G', 'G', 'B') /* 8 RGRG.. GBGB.. */ +#define V4L2_PIX_FMT_SBGGR10 v4l2_fourcc('B', 'G', '1', '0') /* 10 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SGBRG10 v4l2_fourcc('G', 'B', '1', '0') /* 10 GBGB.. RGRG.. */ +#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10 GRGR.. BGBG.. */ +#define V4L2_PIX_FMT_SRGGB10 v4l2_fourcc('R', 'G', '1', '0') /* 10 RGRG.. GBGB.. */ + /* 10bit raw bayer packed, 5 bytes for every 4 pixels */ +#define V4L2_PIX_FMT_SBGGR10P v4l2_fourcc('p', 'B', 'A', 'A') +#define V4L2_PIX_FMT_SGBRG10P v4l2_fourcc('p', 'G', 'A', 'A') +#define V4L2_PIX_FMT_SGRBG10P v4l2_fourcc('p', 'g', 'A', 'A') +#define V4L2_PIX_FMT_SRGGB10P v4l2_fourcc('p', 'R', 'A', 'A') + /* 10bit raw bayer a-law compressed to 8 bits */ +#define V4L2_PIX_FMT_SBGGR10ALAW8 v4l2_fourcc('a', 'B', 'A', '8') +#define V4L2_PIX_FMT_SGBRG10ALAW8 v4l2_fourcc('a', 'G', 'A', '8') +#define V4L2_PIX_FMT_SGRBG10ALAW8 v4l2_fourcc('a', 'g', 'A', '8') +#define V4L2_PIX_FMT_SRGGB10ALAW8 v4l2_fourcc('a', 'R', 'A', '8') + /* 10bit raw bayer DPCM compressed to 8 bits */ +#define V4L2_PIX_FMT_SBGGR10DPCM8 v4l2_fourcc('b', 'B', 'A', '8') +#define V4L2_PIX_FMT_SGBRG10DPCM8 v4l2_fourcc('b', 'G', 'A', '8') +#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0') +#define V4L2_PIX_FMT_SRGGB10DPCM8 v4l2_fourcc('b', 'R', 'A', '8') +#define V4L2_PIX_FMT_SBGGR12 v4l2_fourcc('B', 'G', '1', '2') /* 12 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SGBRG12 v4l2_fourcc('G', 'B', '1', '2') /* 12 GBGB.. RGRG.. */ +#define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12 GRGR.. BGBG.. */ +#define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12 RGRG.. GBGB.. */ + /* 12bit raw bayer packed, 6 bytes for every 4 pixels */ +#define V4L2_PIX_FMT_SBGGR12P v4l2_fourcc('p', 'B', 'C', 'C') +#define V4L2_PIX_FMT_SGBRG12P v4l2_fourcc('p', 'G', 'C', 'C') +#define V4L2_PIX_FMT_SGRBG12P v4l2_fourcc('p', 'g', 'C', 'C') +#define V4L2_PIX_FMT_SRGGB12P v4l2_fourcc('p', 'R', 'C', 'C') +#define V4L2_PIX_FMT_SBGGR14 v4l2_fourcc('B', 'G', '1', '4') /* 14 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SGBRG14 v4l2_fourcc('G', 'B', '1', '4') /* 14 GBGB.. RGRG.. */ +#define V4L2_PIX_FMT_SGRBG14 v4l2_fourcc('G', 'R', '1', '4') /* 14 GRGR.. BGBG.. */ +#define V4L2_PIX_FMT_SRGGB14 v4l2_fourcc('R', 'G', '1', '4') /* 14 RGRG.. GBGB.. */ + /* 14bit raw bayer packed, 7 bytes for every 4 pixels */ +#define V4L2_PIX_FMT_SBGGR14P v4l2_fourcc('p', 'B', 'E', 'E') +#define V4L2_PIX_FMT_SGBRG14P v4l2_fourcc('p', 'G', 'E', 'E') +#define V4L2_PIX_FMT_SGRBG14P v4l2_fourcc('p', 'g', 'E', 'E') +#define V4L2_PIX_FMT_SRGGB14P v4l2_fourcc('p', 'R', 'E', 'E') +#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SGBRG16 v4l2_fourcc('G', 'B', '1', '6') /* 16 GBGB.. RGRG.. */ +#define V4L2_PIX_FMT_SGRBG16 v4l2_fourcc('G', 'R', '1', '6') /* 16 GRGR.. BGBG.. */ +#define V4L2_PIX_FMT_SRGGB16 v4l2_fourcc('R', 'G', '1', '6') /* 16 RGRG.. GBGB.. */ + +/* HSV formats */ +#define V4L2_PIX_FMT_HSV24 v4l2_fourcc('H', 'S', 'V', '3') +#define V4L2_PIX_FMT_HSV32 v4l2_fourcc('H', 'S', 'V', '4') + +/* compressed formats */ +#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M', 'J', 'P', 'G') /* Motion-JPEG */ +#define V4L2_PIX_FMT_JPEG v4l2_fourcc('J', 'P', 'E', 'G') /* JFIF JPEG */ +#define V4L2_PIX_FMT_DV v4l2_fourcc('d', 'v', 's', 'd') /* 1394 */ +#define V4L2_PIX_FMT_MPEG v4l2_fourcc('M', 'P', 'E', 'G') /* MPEG-1/2/4 Multiplexed */ +#define V4L2_PIX_FMT_H264 v4l2_fourcc('H', '2', '6', '4') /* H264 with start codes */ +#define V4L2_PIX_FMT_H264_NO_SC v4l2_fourcc('A', 'V', 'C', '1') /* H264 without start codes */ +#define V4L2_PIX_FMT_H264_MVC v4l2_fourcc('M', '2', '6', '4') /* H264 MVC */ +#define V4L2_PIX_FMT_H263 v4l2_fourcc('H', '2', '6', '3') /* H263 */ +#define V4L2_PIX_FMT_MPEG1 v4l2_fourcc('M', 'P', 'G', '1') /* MPEG-1 ES */ +#define V4L2_PIX_FMT_MPEG2 v4l2_fourcc('M', 'P', 'G', '2') /* MPEG-2 ES */ +#define V4L2_PIX_FMT_MPEG2_SLICE v4l2_fourcc('M', 'G', '2', 'S') /* MPEG-2 parsed slice data */ +#define V4L2_PIX_FMT_MPEG4 v4l2_fourcc('M', 'P', 'G', '4') /* MPEG-4 part 2 ES */ +#define V4L2_PIX_FMT_XVID v4l2_fourcc('X', 'V', 'I', 'D') /* Xvid */ +#define V4L2_PIX_FMT_VC1_ANNEX_G v4l2_fourcc('V', 'C', '1', 'G') /* SMPTE 421M Annex G compliant stream */ +#define V4L2_PIX_FMT_VC1_ANNEX_L v4l2_fourcc('V', 'C', '1', 'L') /* SMPTE 421M Annex L compliant stream */ +#define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* VP8 */ +#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F') /* VP8 parsed frame */ +#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') /* VP9 */ +#define V4L2_PIX_FMT_VP9_FRAME v4l2_fourcc('V', 'P', '9', 'F') /* VP9 parsed frame */ +#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') /* HEVC aka H.265 */ +#define V4L2_PIX_FMT_FWHT v4l2_fourcc('F', 'W', 'H', 'T') /* Fast Walsh Hadamard Transform (vicodec) */ +#define V4L2_PIX_FMT_FWHT_STATELESS v4l2_fourcc('S', 'F', 'W', 'H') /* Stateless FWHT (vicodec) */ +#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */ +#define V4L2_PIX_FMT_HEVC_SLICE v4l2_fourcc('S', '2', '6', '5') /* HEVC parsed slices */ +#define V4L2_PIX_FMT_AV1_FRAME v4l2_fourcc('A', 'V', '1', 'F') /* AV1 parsed frame */ +#define V4L2_PIX_FMT_SPK v4l2_fourcc('S', 'P', 'K', '0') /* Sorenson Spark */ +#define V4L2_PIX_FMT_RV30 v4l2_fourcc('R', 'V', '3', '0') /* RealVideo 8 */ +#define V4L2_PIX_FMT_RV40 v4l2_fourcc('R', 'V', '4', '0') /* RealVideo 9 & 10 */ + +/* Vendor-specific formats */ +#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */ +#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */ +#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */ +#define V4L2_PIX_FMT_SN9C20X_I420 v4l2_fourcc('S', '9', '2', '0') /* SN9C20x YUV 4:2:0 */ +#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P', 'W', 'C', '1') /* pwc older webcam */ +#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */ +#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */ +#define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S', '5', '0', '1') /* YUYV per line */ +#define V4L2_PIX_FMT_SPCA505 v4l2_fourcc('S', '5', '0', '5') /* YYUV per line */ +#define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */ +#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */ +#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */ +#define V4L2_PIX_FMT_MR97310A v4l2_fourcc('M', '3', '1', '0') /* compressed BGGR bayer */ +#define V4L2_PIX_FMT_JL2005BCD v4l2_fourcc('J', 'L', '2', '0') /* compressed RGGB bayer */ +#define V4L2_PIX_FMT_SN9C2028 v4l2_fourcc('S', 'O', 'N', 'X') /* compressed GBRG bayer */ +#define V4L2_PIX_FMT_SQ905C v4l2_fourcc('9', '0', '5', 'C') /* compressed RGGB bayer */ +#define V4L2_PIX_FMT_PJPG v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */ +#define V4L2_PIX_FMT_OV511 v4l2_fourcc('O', '5', '1', '1') /* ov511 JPEG */ +#define V4L2_PIX_FMT_OV518 v4l2_fourcc('O', '5', '1', '8') /* ov518 JPEG */ +#define V4L2_PIX_FMT_STV0680 v4l2_fourcc('S', '6', '8', '0') /* stv0680 bayer */ +#define V4L2_PIX_FMT_TM6000 v4l2_fourcc('T', 'M', '6', '0') /* tm5600/tm60x0 */ +#define V4L2_PIX_FMT_CIT_YYVYUY v4l2_fourcc('C', 'I', 'T', 'V') /* one line of Y then 1 line of VYUY */ +#define V4L2_PIX_FMT_KONICA420 v4l2_fourcc('K', 'O', 'N', 'I') /* YUV420 planar in blocks of 256 pixels */ +#define V4L2_PIX_FMT_JPGL v4l2_fourcc('J', 'P', 'G', 'L') /* JPEG-Lite */ +#define V4L2_PIX_FMT_SE401 v4l2_fourcc('S', '4', '0', '1') /* se401 janggu compressed rgb */ +#define V4L2_PIX_FMT_S5C_UYVY_JPG v4l2_fourcc('S', '5', 'C', 'I') /* S5C73M3 interleaved UYVY/JPEG */ +#define V4L2_PIX_FMT_Y8I v4l2_fourcc('Y', '8', 'I', ' ') /* Greyscale 8-bit L/R interleaved */ +#define V4L2_PIX_FMT_Y12I v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */ +#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */ +#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */ +#define V4L2_PIX_FMT_MM21 v4l2_fourcc('M', 'M', '2', '1') /* Mediatek 8-bit block mode, two non-contiguous planes */ +#define V4L2_PIX_FMT_MT2110T v4l2_fourcc('M', 'T', '2', 'T') /* Mediatek 10-bit block tile mode */ +#define V4L2_PIX_FMT_MT2110R v4l2_fourcc('M', 'T', '2', 'R') /* Mediatek 10-bit block raster mode */ +#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */ +#define V4L2_PIX_FMT_CNF4 v4l2_fourcc('C', 'N', 'F', '4') /* Intel 4-bit packed depth confidence information */ +#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* BTTV 8-bit dithered RGB */ +#define V4L2_PIX_FMT_QC08C v4l2_fourcc('Q', '0', '8', 'C') /* Qualcomm 8-bit compressed */ +#define V4L2_PIX_FMT_QC10C v4l2_fourcc('Q', '1', '0', 'C') /* Qualcomm 10-bit compressed */ +#define V4L2_PIX_FMT_AJPG v4l2_fourcc('A', 'J', 'P', 'G') /* Aspeed JPEG */ +#define V4L2_PIX_FMT_HEXTILE v4l2_fourcc('H', 'X', 'T', 'L') /* Hextile compressed */ + +/* 10bit raw packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */ +#define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */ +#define V4L2_PIX_FMT_IPU3_SGBRG10 v4l2_fourcc('i', 'p', '3', 'g') /* IPU3 packed 10-bit GBRG bayer */ +#define V4L2_PIX_FMT_IPU3_SGRBG10 v4l2_fourcc('i', 'p', '3', 'G') /* IPU3 packed 10-bit GRBG bayer */ +#define V4L2_PIX_FMT_IPU3_SRGGB10 v4l2_fourcc('i', 'p', '3', 'r') /* IPU3 packed 10-bit RGGB bayer */ + +/* Raspberry Pi PiSP compressed formats. */ +#define V4L2_PIX_FMT_PISP_COMP1_RGGB v4l2_fourcc('P', 'C', '1', 'R') /* PiSP 8-bit mode 1 compressed RGGB bayer */ +#define V4L2_PIX_FMT_PISP_COMP1_GRBG v4l2_fourcc('P', 'C', '1', 'G') /* PiSP 8-bit mode 1 compressed GRBG bayer */ +#define V4L2_PIX_FMT_PISP_COMP1_GBRG v4l2_fourcc('P', 'C', '1', 'g') /* PiSP 8-bit mode 1 compressed GBRG bayer */ +#define V4L2_PIX_FMT_PISP_COMP1_BGGR v4l2_fourcc('P', 'C', '1', 'B') /* PiSP 8-bit mode 1 compressed BGGR bayer */ +#define V4L2_PIX_FMT_PISP_COMP1_MONO v4l2_fourcc('P', 'C', '1', 'M') /* PiSP 8-bit mode 1 compressed monochrome */ +#define V4L2_PIX_FMT_PISP_COMP2_RGGB v4l2_fourcc('P', 'C', '2', 'R') /* PiSP 8-bit mode 2 compressed RGGB bayer */ +#define V4L2_PIX_FMT_PISP_COMP2_GRBG v4l2_fourcc('P', 'C', '2', 'G') /* PiSP 8-bit mode 2 compressed GRBG bayer */ +#define V4L2_PIX_FMT_PISP_COMP2_GBRG v4l2_fourcc('P', 'C', '2', 'g') /* PiSP 8-bit mode 2 compressed GBRG bayer */ +#define V4L2_PIX_FMT_PISP_COMP2_BGGR v4l2_fourcc('P', 'C', '2', 'B') /* PiSP 8-bit mode 2 compressed BGGR bayer */ +#define V4L2_PIX_FMT_PISP_COMP2_MONO v4l2_fourcc('P', 'C', '2', 'M') /* PiSP 8-bit mode 2 compressed monochrome */ + +/* SDR formats - used only for Software Defined Radio devices */ +#define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */ +#define V4L2_SDR_FMT_CU16LE v4l2_fourcc('C', 'U', '1', '6') /* IQ u16le */ +#define V4L2_SDR_FMT_CS8 v4l2_fourcc('C', 'S', '0', '8') /* complex s8 */ +#define V4L2_SDR_FMT_CS14LE v4l2_fourcc('C', 'S', '1', '4') /* complex s14le */ +#define V4L2_SDR_FMT_RU12LE v4l2_fourcc('R', 'U', '1', '2') /* real u12le */ +#define V4L2_SDR_FMT_PCU16BE v4l2_fourcc('P', 'C', '1', '6') /* planar complex u16be */ +#define V4L2_SDR_FMT_PCU18BE v4l2_fourcc('P', 'C', '1', '8') /* planar complex u18be */ +#define V4L2_SDR_FMT_PCU20BE v4l2_fourcc('P', 'C', '2', '0') /* planar complex u20be */ + +/* Touch formats - used for Touch devices */ +#define V4L2_TCH_FMT_DELTA_TD16 v4l2_fourcc('T', 'D', '1', '6') /* 16-bit signed deltas */ +#define V4L2_TCH_FMT_DELTA_TD08 v4l2_fourcc('T', 'D', '0', '8') /* 8-bit signed deltas */ +#define V4L2_TCH_FMT_TU16 v4l2_fourcc('T', 'U', '1', '6') /* 16-bit unsigned touch data */ +#define V4L2_TCH_FMT_TU08 v4l2_fourcc('T', 'U', '0', '8') /* 8-bit unsigned touch data */ + +/* Meta-data formats */ +#define V4L2_META_FMT_VSP1_HGO v4l2_fourcc('V', 'S', 'P', 'H') /* R-Car VSP1 1-D Histogram */ +#define V4L2_META_FMT_VSP1_HGT v4l2_fourcc('V', 'S', 'P', 'T') /* R-Car VSP1 2-D Histogram */ +#define V4L2_META_FMT_UVC v4l2_fourcc('U', 'V', 'C', 'H') /* UVC Payload Header metadata */ +#define V4L2_META_FMT_D4XX v4l2_fourcc('D', '4', 'X', 'X') /* D4XX Payload Header metadata */ +#define V4L2_META_FMT_VIVID v4l2_fourcc('V', 'I', 'V', 'D') /* Vivid Metadata */ +#define V4L2_META_FMT_SENSOR_DATA v4l2_fourcc('S', 'E', 'N', 'S') /* Sensor Ancillary metadata */ +#define V4L2_META_FMT_BCM2835_ISP_STATS v4l2_fourcc('B', 'S', 'T', 'A') /* BCM2835 ISP image statistics output */ + +/* Vendor specific - used for RK_ISP1 camera sub-system */ +#define V4L2_META_FMT_RK_ISP1_PARAMS v4l2_fourcc('R', 'K', '1', 'P') /* Rockchip ISP1 3A Parameters */ +#define V4L2_META_FMT_RK_ISP1_STAT_3A v4l2_fourcc('R', 'K', '1', 'S') /* Rockchip ISP1 3A Statistics */ + +/* The metadata format identifier for BE configuration buffers. */ +#define V4L2_META_FMT_RPI_BE_CFG v4l2_fourcc('R', 'P', 'B', 'C') + +/* The metadata format identifier for FE configuration buffers. */ +#define V4L2_META_FMT_RPI_FE_CFG v4l2_fourcc('R', 'P', 'F', 'C') + +/* The metadata format identifier for FE stats buffers. */ +#define V4L2_META_FMT_RPI_FE_STATS v4l2_fourcc('R', 'P', 'F', 'S') + +/* priv field value to indicates that subsequent fields are valid. */ +#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe + +/* Flags */ +#define V4L2_PIX_FMT_FLAG_PREMUL_ALPHA 0x00000001 +#define V4L2_PIX_FMT_FLAG_SET_CSC 0x00000002 + +/* + * F O R M A T E N U M E R A T I O N + */ +struct v4l2_fmtdesc { + __u32 index; /* Format number */ + __u32 type; /* enum v4l2_buf_type */ + __u32 flags; + __u8 description[32]; /* Description string */ + __u32 pixelformat; /* Format fourcc */ + __u32 mbus_code; /* Media bus code */ + __u32 reserved[3]; +}; + +#define V4L2_FMT_FLAG_COMPRESSED 0x0001 +#define V4L2_FMT_FLAG_EMULATED 0x0002 +#define V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM 0x0004 +#define V4L2_FMT_FLAG_DYN_RESOLUTION 0x0008 +#define V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL 0x0010 +#define V4L2_FMT_FLAG_CSC_COLORSPACE 0x0020 +#define V4L2_FMT_FLAG_CSC_XFER_FUNC 0x0040 +#define V4L2_FMT_FLAG_CSC_YCBCR_ENC 0x0080 +#define V4L2_FMT_FLAG_CSC_HSV_ENC V4L2_FMT_FLAG_CSC_YCBCR_ENC +#define V4L2_FMT_FLAG_CSC_QUANTIZATION 0x0100 +#define V4L2_FMT_FLAG_META_LINE_BASED 0x0200 + + /* Frame Size and frame rate enumeration */ +/* + * F R A M E S I Z E E N U M E R A T I O N + */ +enum v4l2_frmsizetypes { + V4L2_FRMSIZE_TYPE_DISCRETE = 1, + V4L2_FRMSIZE_TYPE_CONTINUOUS = 2, + V4L2_FRMSIZE_TYPE_STEPWISE = 3, +}; + +struct v4l2_frmsize_discrete { + __u32 width; /* Frame width [pixel] */ + __u32 height; /* Frame height [pixel] */ +}; + +struct v4l2_frmsize_stepwise { + __u32 min_width; /* Minimum frame width [pixel] */ + __u32 max_width; /* Maximum frame width [pixel] */ + __u32 step_width; /* Frame width step size [pixel] */ + __u32 min_height; /* Minimum frame height [pixel] */ + __u32 max_height; /* Maximum frame height [pixel] */ + __u32 step_height; /* Frame height step size [pixel] */ +}; + +struct v4l2_frmsizeenum { + __u32 index; /* Frame size number */ + __u32 pixel_format; /* Pixel format */ + __u32 type; /* Frame size type the device supports. */ + + union { /* Frame size */ + struct v4l2_frmsize_discrete discrete; + struct v4l2_frmsize_stepwise stepwise; + }; + + __u32 reserved[2]; /* Reserved space for future use */ +}; + +/* + * F R A M E R A T E E N U M E R A T I O N + */ +enum v4l2_frmivaltypes { + V4L2_FRMIVAL_TYPE_DISCRETE = 1, + V4L2_FRMIVAL_TYPE_CONTINUOUS = 2, + V4L2_FRMIVAL_TYPE_STEPWISE = 3, +}; + +struct v4l2_frmival_stepwise { + struct v4l2_fract min; /* Minimum frame interval [s] */ + struct v4l2_fract max; /* Maximum frame interval [s] */ + struct v4l2_fract step; /* Frame interval step size [s] */ +}; + +struct v4l2_frmivalenum { + __u32 index; /* Frame format index */ + __u32 pixel_format; /* Pixel format */ + __u32 width; /* Frame width */ + __u32 height; /* Frame height */ + __u32 type; /* Frame interval type the device supports. */ + + union { /* Frame interval */ + struct v4l2_fract discrete; + struct v4l2_frmival_stepwise stepwise; + }; + + __u32 reserved[2]; /* Reserved space for future use */ +}; + +/* + * T I M E C O D E + */ +struct v4l2_timecode { + __u32 type; + __u32 flags; + __u8 frames; + __u8 seconds; + __u8 minutes; + __u8 hours; + __u8 userbits[4]; +}; + +/* Type */ +#define V4L2_TC_TYPE_24FPS 1 +#define V4L2_TC_TYPE_25FPS 2 +#define V4L2_TC_TYPE_30FPS 3 +#define V4L2_TC_TYPE_50FPS 4 +#define V4L2_TC_TYPE_60FPS 5 + +/* Flags */ +#define V4L2_TC_FLAG_DROPFRAME 0x0001 /* "drop-frame" mode */ +#define V4L2_TC_FLAG_COLORFRAME 0x0002 +#define V4L2_TC_USERBITS_field 0x000C +#define V4L2_TC_USERBITS_USERDEFINED 0x0000 +#define V4L2_TC_USERBITS_8BITCHARS 0x0008 +/* The above is based on SMPTE timecodes */ + +struct v4l2_jpegcompression { + int quality; + + int APPn; /* Number of APP segment to be written, + * must be 0..15 */ + int APP_len; /* Length of data in JPEG APPn segment */ + char APP_data[60]; /* Data in the JPEG APPn segment. */ + + int COM_len; /* Length of data in JPEG COM segment */ + char COM_data[60]; /* Data in JPEG COM segment */ + + __u32 jpeg_markers; /* Which markers should go into the JPEG + * output. Unless you exactly know what + * you do, leave them untouched. + * Including less markers will make the + * resulting code smaller, but there will + * be fewer applications which can read it. + * The presence of the APP and COM marker + * is influenced by APP_len and COM_len + * ONLY, not by this property! */ + +#define V4L2_JPEG_MARKER_DHT (1<<3) /* Define Huffman Tables */ +#define V4L2_JPEG_MARKER_DQT (1<<4) /* Define Quantization Tables */ +#define V4L2_JPEG_MARKER_DRI (1<<5) /* Define Restart Interval */ +#define V4L2_JPEG_MARKER_COM (1<<6) /* Comment segment */ +#define V4L2_JPEG_MARKER_APP (1<<7) /* App segment, driver will + * always use APP0 */ +}; + +/* + * M E M O R Y - M A P P I N G B U F F E R S + */ + + +struct v4l2_requestbuffers { + __u32 count; + __u32 type; /* enum v4l2_buf_type */ + __u32 memory; /* enum v4l2_memory */ + __u32 capabilities; + __u8 flags; + __u8 reserved[3]; +}; + +#define V4L2_MEMORY_FLAG_NON_COHERENT (1 << 0) + +/* capabilities for struct v4l2_requestbuffers and v4l2_create_buffers */ +#define V4L2_BUF_CAP_SUPPORTS_MMAP (1 << 0) +#define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1) +#define V4L2_BUF_CAP_SUPPORTS_DMABUF (1 << 2) +#define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3) +#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4) +#define V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 5) +#define V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS (1 << 6) +#define V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS (1 << 7) +#define V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS (1 << 8) + +/** + * struct v4l2_plane - plane info for multi-planar buffers + * @bytesused: number of bytes occupied by data in the plane (payload) + * @length: size of this plane (NOT the payload) in bytes + * @m.mem_offset: when memory in the associated struct v4l2_buffer is + * V4L2_MEMORY_MMAP, equals the offset from the start of + * the device memory for this plane (or is a "cookie" that + * should be passed to mmap() called on the video node) + * @m.userptr: when memory is V4L2_MEMORY_USERPTR, a userspace pointer + * pointing to this plane + * @m.fd: when memory is V4L2_MEMORY_DMABUF, a userspace file + * descriptor associated with this plane + * @m: union of @mem_offset, @userptr and @fd + * @data_offset: offset in the plane to the start of data; usually 0, + * unless there is a header in front of the data + * @reserved: drivers and applications must zero this array + * + * Multi-planar buffers consist of one or more planes, e.g. an YCbCr buffer + * with two planes can have one plane for Y, and another for interleaved CbCr + * components. Each plane can reside in a separate memory buffer, or even in + * a completely separate memory node (e.g. in embedded devices). + */ +struct v4l2_plane { + __u32 bytesused; + __u32 length; + union { + __u32 mem_offset; + unsigned long userptr; + __s32 fd; + } m; + __u32 data_offset; + __u32 reserved[11]; +}; + +/** + * struct v4l2_buffer - video buffer info + * @index: id number of the buffer + * @type: enum v4l2_buf_type; buffer type (type == *_MPLANE for + * multiplanar buffers); + * @bytesused: number of bytes occupied by data in the buffer (payload); + * unused (set to 0) for multiplanar buffers + * @flags: buffer informational flags + * @field: enum v4l2_field; field order of the image in the buffer + * @timestamp: frame timestamp + * @timecode: frame timecode + * @sequence: sequence count of this frame + * @memory: enum v4l2_memory; the method, in which the actual video data is + * passed + * @m.offset: for non-multiplanar buffers with memory == V4L2_MEMORY_MMAP; + * offset from the start of the device memory for this plane, + * (or a "cookie" that should be passed to mmap() as offset) + * @m.userptr: for non-multiplanar buffers with memory == V4L2_MEMORY_USERPTR; + * a userspace pointer pointing to this buffer + * @m.fd: for non-multiplanar buffers with memory == V4L2_MEMORY_DMABUF; + * a userspace file descriptor associated with this buffer + * @m.planes: for multiplanar buffers; userspace pointer to the array of plane + * info structs for this buffer + * @m: union of @offset, @userptr, @planes and @fd + * @length: size in bytes of the buffer (NOT its payload) for single-plane + * buffers (when type != *_MPLANE); number of elements in the + * planes array for multi-plane buffers + * @reserved2: drivers and applications must zero this field + * @request_fd: fd of the request that this buffer should use + * @reserved: for backwards compatibility with applications that do not know + * about @request_fd + * + * Contains data exchanged by application and driver using one of the Streaming + * I/O methods. + */ +struct v4l2_buffer { + __u32 index; + __u32 type; + __u32 bytesused; + __u32 flags; + __u32 field; + struct timeval timestamp; + struct v4l2_timecode timecode; + __u32 sequence; + + /* memory location */ + __u32 memory; + union { + __u32 offset; + unsigned long userptr; + struct v4l2_plane *planes; + __s32 fd; + } m; + __u32 length; + __u32 reserved2; + union { + __s32 request_fd; + __u32 reserved; + }; +}; + +/** + * v4l2_timeval_to_ns - Convert timeval to nanoseconds + * @tv: pointer to the timeval variable to be converted + * + * Returns the scalar nanosecond representation of the timeval + * parameter. + */ +static __inline__ __u64 v4l2_timeval_to_ns(const struct timeval *tv) +{ + return (__u64)tv->tv_sec * 1000000000ULL + tv->tv_usec * 1000; +} + +/* Flags for 'flags' field */ +/* Buffer is mapped (flag) */ +#define V4L2_BUF_FLAG_MAPPED 0x00000001 +/* Buffer is queued for processing */ +#define V4L2_BUF_FLAG_QUEUED 0x00000002 +/* Buffer is ready */ +#define V4L2_BUF_FLAG_DONE 0x00000004 +/* Image is a keyframe (I-frame) */ +#define V4L2_BUF_FLAG_KEYFRAME 0x00000008 +/* Image is a P-frame */ +#define V4L2_BUF_FLAG_PFRAME 0x00000010 +/* Image is a B-frame */ +#define V4L2_BUF_FLAG_BFRAME 0x00000020 +/* Buffer is ready, but the data contained within is corrupted. */ +#define V4L2_BUF_FLAG_ERROR 0x00000040 +/* Buffer is added to an unqueued request */ +#define V4L2_BUF_FLAG_IN_REQUEST 0x00000080 +/* timecode field is valid */ +#define V4L2_BUF_FLAG_TIMECODE 0x00000100 +/* Don't return the capture buffer until OUTPUT timestamp changes */ +#define V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF 0x00000200 +/* Buffer is prepared for queuing */ +#define V4L2_BUF_FLAG_PREPARED 0x00000400 +/* Cache handling flags */ +#define V4L2_BUF_FLAG_NO_CACHE_INVALIDATE 0x00000800 +#define V4L2_BUF_FLAG_NO_CACHE_CLEAN 0x00001000 +/* Timestamp type */ +#define V4L2_BUF_FLAG_TIMESTAMP_MASK 0x0000e000 +#define V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN 0x00000000 +#define V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC 0x00002000 +#define V4L2_BUF_FLAG_TIMESTAMP_COPY 0x00004000 +/* Timestamp sources. */ +#define V4L2_BUF_FLAG_TSTAMP_SRC_MASK 0x00070000 +#define V4L2_BUF_FLAG_TSTAMP_SRC_EOF 0x00000000 +#define V4L2_BUF_FLAG_TSTAMP_SRC_SOE 0x00010000 +/* mem2mem encoder/decoder */ +#define V4L2_BUF_FLAG_LAST 0x00100000 +/* request_fd is valid */ +#define V4L2_BUF_FLAG_REQUEST_FD 0x00800000 + +/** + * struct v4l2_exportbuffer - export of video buffer as DMABUF file descriptor + * + * @index: id number of the buffer + * @type: enum v4l2_buf_type; buffer type (type == *_MPLANE for + * multiplanar buffers); + * @plane: index of the plane to be exported, 0 for single plane queues + * @flags: flags for newly created file, currently only O_CLOEXEC is + * supported, refer to manual of open syscall for more details + * @fd: file descriptor associated with DMABUF (set by driver) + * @reserved: drivers and applications must zero this array + * + * Contains data used for exporting a video buffer as DMABUF file descriptor. + * The buffer is identified by a 'cookie' returned by VIDIOC_QUERYBUF + * (identical to the cookie used to mmap() the buffer to userspace). All + * reserved fields must be set to zero. The field reserved0 is expected to + * become a structure 'type' allowing an alternative layout of the structure + * content. Therefore this field should not be used for any other extensions. + */ +struct v4l2_exportbuffer { + __u32 type; /* enum v4l2_buf_type */ + __u32 index; + __u32 plane; + __u32 flags; + __s32 fd; + __u32 reserved[11]; +}; + +/* + * O V E R L A Y P R E V I E W + */ +struct v4l2_framebuffer { + __u32 capability; + __u32 flags; +/* FIXME: in theory we should pass something like PCI device + memory + * region + offset instead of some physical address */ + void *base; + struct { + __u32 width; + __u32 height; + __u32 pixelformat; + __u32 field; /* enum v4l2_field */ + __u32 bytesperline; /* for padding, zero if unused */ + __u32 sizeimage; + __u32 colorspace; /* enum v4l2_colorspace */ + __u32 priv; /* reserved field, set to 0 */ + } fmt; +}; +/* Flags for the 'capability' field. Read only */ +#define V4L2_FBUF_CAP_EXTERNOVERLAY 0x0001 +#define V4L2_FBUF_CAP_CHROMAKEY 0x0002 +#define V4L2_FBUF_CAP_LIST_CLIPPING 0x0004 +#define V4L2_FBUF_CAP_BITMAP_CLIPPING 0x0008 +#define V4L2_FBUF_CAP_LOCAL_ALPHA 0x0010 +#define V4L2_FBUF_CAP_GLOBAL_ALPHA 0x0020 +#define V4L2_FBUF_CAP_LOCAL_INV_ALPHA 0x0040 +#define V4L2_FBUF_CAP_SRC_CHROMAKEY 0x0080 +/* Flags for the 'flags' field. */ +#define V4L2_FBUF_FLAG_PRIMARY 0x0001 +#define V4L2_FBUF_FLAG_OVERLAY 0x0002 +#define V4L2_FBUF_FLAG_CHROMAKEY 0x0004 +#define V4L2_FBUF_FLAG_LOCAL_ALPHA 0x0008 +#define V4L2_FBUF_FLAG_GLOBAL_ALPHA 0x0010 +#define V4L2_FBUF_FLAG_LOCAL_INV_ALPHA 0x0020 +#define V4L2_FBUF_FLAG_SRC_CHROMAKEY 0x0040 + +struct v4l2_clip { + struct v4l2_rect c; + struct v4l2_clip *next; +}; + +struct v4l2_window { + struct v4l2_rect w; + __u32 field; /* enum v4l2_field */ + __u32 chromakey; + struct v4l2_clip *clips; + __u32 clipcount; + void *bitmap; + __u8 global_alpha; +}; + +/* + * C A P T U R E P A R A M E T E R S + */ +struct v4l2_captureparm { + __u32 capability; /* Supported modes */ + __u32 capturemode; /* Current mode */ + struct v4l2_fract timeperframe; /* Time per frame in seconds */ + __u32 extendedmode; /* Driver-specific extensions */ + __u32 readbuffers; /* # of buffers for read */ + __u32 reserved[4]; +}; + +/* Flags for 'capability' and 'capturemode' fields */ +#define V4L2_MODE_HIGHQUALITY 0x0001 /* High quality imaging mode */ +#define V4L2_CAP_TIMEPERFRAME 0x1000 /* timeperframe field is supported */ + +struct v4l2_outputparm { + __u32 capability; /* Supported modes */ + __u32 outputmode; /* Current mode */ + struct v4l2_fract timeperframe; /* Time per frame in seconds */ + __u32 extendedmode; /* Driver-specific extensions */ + __u32 writebuffers; /* # of buffers for write */ + __u32 reserved[4]; +}; + +/* + * I N P U T I M A G E C R O P P I N G + */ +struct v4l2_cropcap { + __u32 type; /* enum v4l2_buf_type */ + struct v4l2_rect bounds; + struct v4l2_rect defrect; + struct v4l2_fract pixelaspect; +}; + +struct v4l2_crop { + __u32 type; /* enum v4l2_buf_type */ + struct v4l2_rect c; +}; + +/** + * struct v4l2_selection - selection info + * @type: buffer type (do not use *_MPLANE types) + * @target: Selection target, used to choose one of possible rectangles; + * defined in v4l2-common.h; V4L2_SEL_TGT_* . + * @flags: constraints flags, defined in v4l2-common.h; V4L2_SEL_FLAG_*. + * @r: coordinates of selection window + * @reserved: for future use, rounds structure size to 64 bytes, set to zero + * + * Hardware may use multiple helper windows to process a video stream. + * The structure is used to exchange this selection areas between + * an application and a driver. + */ +struct v4l2_selection { + __u32 type; + __u32 target; + __u32 flags; + struct v4l2_rect r; + __u32 reserved[9]; +}; + + +/* + * A N A L O G V I D E O S T A N D A R D + */ + +typedef __u64 v4l2_std_id; + +/* + * Attention: Keep the V4L2_STD_* bit definitions in sync with + * include/dt-bindings/display/sdtv-standards.h SDTV_STD_* bit definitions. + */ +/* one bit for each */ +#define V4L2_STD_PAL_B ((v4l2_std_id)0x00000001) +#define V4L2_STD_PAL_B1 ((v4l2_std_id)0x00000002) +#define V4L2_STD_PAL_G ((v4l2_std_id)0x00000004) +#define V4L2_STD_PAL_H ((v4l2_std_id)0x00000008) +#define V4L2_STD_PAL_I ((v4l2_std_id)0x00000010) +#define V4L2_STD_PAL_D ((v4l2_std_id)0x00000020) +#define V4L2_STD_PAL_D1 ((v4l2_std_id)0x00000040) +#define V4L2_STD_PAL_K ((v4l2_std_id)0x00000080) + +#define V4L2_STD_PAL_M ((v4l2_std_id)0x00000100) +#define V4L2_STD_PAL_N ((v4l2_std_id)0x00000200) +#define V4L2_STD_PAL_Nc ((v4l2_std_id)0x00000400) +#define V4L2_STD_PAL_60 ((v4l2_std_id)0x00000800) + +#define V4L2_STD_NTSC_M ((v4l2_std_id)0x00001000) /* BTSC */ +#define V4L2_STD_NTSC_M_JP ((v4l2_std_id)0x00002000) /* EIA-J */ +#define V4L2_STD_NTSC_443 ((v4l2_std_id)0x00004000) +#define V4L2_STD_NTSC_M_KR ((v4l2_std_id)0x00008000) /* FM A2 */ + +#define V4L2_STD_SECAM_B ((v4l2_std_id)0x00010000) +#define V4L2_STD_SECAM_D ((v4l2_std_id)0x00020000) +#define V4L2_STD_SECAM_G ((v4l2_std_id)0x00040000) +#define V4L2_STD_SECAM_H ((v4l2_std_id)0x00080000) +#define V4L2_STD_SECAM_K ((v4l2_std_id)0x00100000) +#define V4L2_STD_SECAM_K1 ((v4l2_std_id)0x00200000) +#define V4L2_STD_SECAM_L ((v4l2_std_id)0x00400000) +#define V4L2_STD_SECAM_LC ((v4l2_std_id)0x00800000) + +/* ATSC/HDTV */ +#define V4L2_STD_ATSC_8_VSB ((v4l2_std_id)0x01000000) +#define V4L2_STD_ATSC_16_VSB ((v4l2_std_id)0x02000000) + +/* FIXME: + Although std_id is 64 bits, there is an issue on PPC32 architecture that + makes switch(__u64) to break. So, there's a hack on v4l2-common.c rounding + this value to 32 bits. + As, currently, the max value is for V4L2_STD_ATSC_16_VSB (30 bits wide), + it should work fine. However, if needed to add more than two standards, + v4l2-common.c should be fixed. + */ + +/* + * Some macros to merge video standards in order to make live easier for the + * drivers and V4L2 applications + */ + +/* + * "Common" NTSC/M - It should be noticed that V4L2_STD_NTSC_443 is + * Missing here. + */ +#define V4L2_STD_NTSC (V4L2_STD_NTSC_M |\ + V4L2_STD_NTSC_M_JP |\ + V4L2_STD_NTSC_M_KR) +/* Secam macros */ +#define V4L2_STD_SECAM_DK (V4L2_STD_SECAM_D |\ + V4L2_STD_SECAM_K |\ + V4L2_STD_SECAM_K1) +/* All Secam Standards */ +#define V4L2_STD_SECAM (V4L2_STD_SECAM_B |\ + V4L2_STD_SECAM_G |\ + V4L2_STD_SECAM_H |\ + V4L2_STD_SECAM_DK |\ + V4L2_STD_SECAM_L |\ + V4L2_STD_SECAM_LC) +/* PAL macros */ +#define V4L2_STD_PAL_BG (V4L2_STD_PAL_B |\ + V4L2_STD_PAL_B1 |\ + V4L2_STD_PAL_G) +#define V4L2_STD_PAL_DK (V4L2_STD_PAL_D |\ + V4L2_STD_PAL_D1 |\ + V4L2_STD_PAL_K) +/* + * "Common" PAL - This macro is there to be compatible with the old + * V4L1 concept of "PAL": /BGDKHI. + * Several PAL standards are missing here: /M, /N and /Nc + */ +#define V4L2_STD_PAL (V4L2_STD_PAL_BG |\ + V4L2_STD_PAL_DK |\ + V4L2_STD_PAL_H |\ + V4L2_STD_PAL_I) +/* Chroma "agnostic" standards */ +#define V4L2_STD_B (V4L2_STD_PAL_B |\ + V4L2_STD_PAL_B1 |\ + V4L2_STD_SECAM_B) +#define V4L2_STD_G (V4L2_STD_PAL_G |\ + V4L2_STD_SECAM_G) +#define V4L2_STD_H (V4L2_STD_PAL_H |\ + V4L2_STD_SECAM_H) +#define V4L2_STD_L (V4L2_STD_SECAM_L |\ + V4L2_STD_SECAM_LC) +#define V4L2_STD_GH (V4L2_STD_G |\ + V4L2_STD_H) +#define V4L2_STD_DK (V4L2_STD_PAL_DK |\ + V4L2_STD_SECAM_DK) +#define V4L2_STD_BG (V4L2_STD_B |\ + V4L2_STD_G) +#define V4L2_STD_MN (V4L2_STD_PAL_M |\ + V4L2_STD_PAL_N |\ + V4L2_STD_PAL_Nc |\ + V4L2_STD_NTSC) + +/* Standards where MTS/BTSC stereo could be found */ +#define V4L2_STD_MTS (V4L2_STD_NTSC_M |\ + V4L2_STD_PAL_M |\ + V4L2_STD_PAL_N |\ + V4L2_STD_PAL_Nc) + +/* Standards for Countries with 60Hz Line frequency */ +#define V4L2_STD_525_60 (V4L2_STD_PAL_M |\ + V4L2_STD_PAL_60 |\ + V4L2_STD_NTSC |\ + V4L2_STD_NTSC_443) +/* Standards for Countries with 50Hz Line frequency */ +#define V4L2_STD_625_50 (V4L2_STD_PAL |\ + V4L2_STD_PAL_N |\ + V4L2_STD_PAL_Nc |\ + V4L2_STD_SECAM) + +#define V4L2_STD_ATSC (V4L2_STD_ATSC_8_VSB |\ + V4L2_STD_ATSC_16_VSB) +/* Macros with none and all analog standards */ +#define V4L2_STD_UNKNOWN 0 +#define V4L2_STD_ALL (V4L2_STD_525_60 |\ + V4L2_STD_625_50) + +struct v4l2_standard { + __u32 index; + v4l2_std_id id; + __u8 name[24]; + struct v4l2_fract frameperiod; /* Frames, not fields */ + __u32 framelines; + __u32 reserved[4]; +}; + +/* + * D V B T T I M I N G S + */ + +/** struct v4l2_bt_timings - BT.656/BT.1120 timing data + * @width: total width of the active video in pixels + * @height: total height of the active video in lines + * @interlaced: Interlaced or progressive + * @polarities: Positive or negative polarities + * @pixelclock: Pixel clock in HZ. Ex. 74.25MHz->74250000 + * @hfrontporch:Horizontal front porch in pixels + * @hsync: Horizontal Sync length in pixels + * @hbackporch: Horizontal back porch in pixels + * @vfrontporch:Vertical front porch in lines + * @vsync: Vertical Sync length in lines + * @vbackporch: Vertical back porch in lines + * @il_vfrontporch:Vertical front porch for the even field + * (aka field 2) of interlaced field formats + * @il_vsync: Vertical Sync length for the even field + * (aka field 2) of interlaced field formats + * @il_vbackporch:Vertical back porch for the even field + * (aka field 2) of interlaced field formats + * @standards: Standards the timing belongs to + * @flags: Flags + * @picture_aspect: The picture aspect ratio (hor/vert). + * @cea861_vic: VIC code as per the CEA-861 standard. + * @hdmi_vic: VIC code as per the HDMI standard. + * @reserved: Reserved fields, must be zeroed. + * + * A note regarding vertical interlaced timings: height refers to the total + * height of the active video frame (= two fields). The blanking timings refer + * to the blanking of each field. So the height of the total frame is + * calculated as follows: + * + * tot_height = height + vfrontporch + vsync + vbackporch + + * il_vfrontporch + il_vsync + il_vbackporch + * + * The active height of each field is height / 2. + */ +struct v4l2_bt_timings { + __u32 width; + __u32 height; + __u32 interlaced; + __u32 polarities; + __u64 pixelclock; + __u32 hfrontporch; + __u32 hsync; + __u32 hbackporch; + __u32 vfrontporch; + __u32 vsync; + __u32 vbackporch; + __u32 il_vfrontporch; + __u32 il_vsync; + __u32 il_vbackporch; + __u32 standards; + __u32 flags; + struct v4l2_fract picture_aspect; + __u8 cea861_vic; + __u8 hdmi_vic; + __u8 reserved[46]; +} __attribute__ ((packed)); + +/* Interlaced or progressive format */ +#define V4L2_DV_PROGRESSIVE 0 +#define V4L2_DV_INTERLACED 1 + +/* Polarities. If bit is not set, it is assumed to be negative polarity */ +#define V4L2_DV_VSYNC_POS_POL 0x00000001 +#define V4L2_DV_HSYNC_POS_POL 0x00000002 + +/* Timings standards */ +#define V4L2_DV_BT_STD_CEA861 (1 << 0) /* CEA-861 Digital TV Profile */ +#define V4L2_DV_BT_STD_DMT (1 << 1) /* VESA Discrete Monitor Timings */ +#define V4L2_DV_BT_STD_CVT (1 << 2) /* VESA Coordinated Video Timings */ +#define V4L2_DV_BT_STD_GTF (1 << 3) /* VESA Generalized Timings Formula */ +#define V4L2_DV_BT_STD_SDI (1 << 4) /* SDI Timings */ + +/* Flags */ + +/* + * CVT/GTF specific: timing uses reduced blanking (CVT) or the 'Secondary + * GTF' curve (GTF). In both cases the horizontal and/or vertical blanking + * intervals are reduced, allowing a higher resolution over the same + * bandwidth. This is a read-only flag. + */ +#define V4L2_DV_FL_REDUCED_BLANKING (1 << 0) +/* + * CEA-861 specific: set for CEA-861 formats with a framerate of a multiple + * of six. These formats can be optionally played at 1 / 1.001 speed. + * This is a read-only flag. + */ +#define V4L2_DV_FL_CAN_REDUCE_FPS (1 << 1) +/* + * CEA-861 specific: only valid for video transmitters, the flag is cleared + * by receivers. + * If the framerate of the format is a multiple of six, then the pixelclock + * used to set up the transmitter is divided by 1.001 to make it compatible + * with 60 Hz based standards such as NTSC and PAL-M that use a framerate of + * 29.97 Hz. Otherwise this flag is cleared. If the transmitter can't generate + * such frequencies, then the flag will also be cleared. + */ +#define V4L2_DV_FL_REDUCED_FPS (1 << 2) +/* + * Specific to interlaced formats: if set, then field 1 is really one half-line + * longer and field 2 is really one half-line shorter, so each field has + * exactly the same number of half-lines. Whether half-lines can be detected + * or used depends on the hardware. + */ +#define V4L2_DV_FL_HALF_LINE (1 << 3) +/* + * If set, then this is a Consumer Electronics (CE) video format. Such formats + * differ from other formats (commonly called IT formats) in that if RGB + * encoding is used then by default the RGB values use limited range (i.e. + * use the range 16-235) as opposed to 0-255. All formats defined in CEA-861 + * except for the 640x480 format are CE formats. + */ +#define V4L2_DV_FL_IS_CE_VIDEO (1 << 4) +/* Some formats like SMPTE-125M have an interlaced signal with a odd + * total height. For these formats, if this flag is set, the first + * field has the extra line. If not, it is the second field. + */ +#define V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE (1 << 5) +/* + * If set, then the picture_aspect field is valid. Otherwise assume that the + * pixels are square, so the picture aspect ratio is the same as the width to + * height ratio. + */ +#define V4L2_DV_FL_HAS_PICTURE_ASPECT (1 << 6) +/* + * If set, then the cea861_vic field is valid and contains the Video + * Identification Code as per the CEA-861 standard. + */ +#define V4L2_DV_FL_HAS_CEA861_VIC (1 << 7) +/* + * If set, then the hdmi_vic field is valid and contains the Video + * Identification Code as per the HDMI standard (HDMI Vendor Specific + * InfoFrame). + */ +#define V4L2_DV_FL_HAS_HDMI_VIC (1 << 8) +/* + * CEA-861 specific: only valid for video receivers. + * If set, then HW can detect the difference between regular FPS and + * 1000/1001 FPS. Note: This flag is only valid for HDMI VIC codes with + * the V4L2_DV_FL_CAN_REDUCE_FPS flag set. + */ +#define V4L2_DV_FL_CAN_DETECT_REDUCED_FPS (1 << 9) + +/* A few useful defines to calculate the total blanking and frame sizes */ +#define V4L2_DV_BT_BLANKING_WIDTH(bt) \ + ((bt)->hfrontporch + (bt)->hsync + (bt)->hbackporch) +#define V4L2_DV_BT_FRAME_WIDTH(bt) \ + ((bt)->width + V4L2_DV_BT_BLANKING_WIDTH(bt)) +#define V4L2_DV_BT_BLANKING_HEIGHT(bt) \ + ((bt)->vfrontporch + (bt)->vsync + (bt)->vbackporch + \ + ((bt)->interlaced ? \ + ((bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch) : 0)) +#define V4L2_DV_BT_FRAME_HEIGHT(bt) \ + ((bt)->height + V4L2_DV_BT_BLANKING_HEIGHT(bt)) + +/** struct v4l2_dv_timings - DV timings + * @type: the type of the timings + * @bt: BT656/1120 timings + */ +struct v4l2_dv_timings { + __u32 type; + union { + struct v4l2_bt_timings bt; + __u32 reserved[32]; + }; +} __attribute__ ((packed)); + +/* Values for the type field */ +#define V4L2_DV_BT_656_1120 0 /* BT.656/1120 timing type */ + + +/** struct v4l2_enum_dv_timings - DV timings enumeration + * @index: enumeration index + * @pad: the pad number for which to enumerate timings (used with + * v4l-subdev nodes only) + * @reserved: must be zeroed + * @timings: the timings for the given index + */ +struct v4l2_enum_dv_timings { + __u32 index; + __u32 pad; + __u32 reserved[2]; + struct v4l2_dv_timings timings; +}; + +/** struct v4l2_bt_timings_cap - BT.656/BT.1120 timing capabilities + * @min_width: width in pixels + * @max_width: width in pixels + * @min_height: height in lines + * @max_height: height in lines + * @min_pixelclock: Pixel clock in HZ. Ex. 74.25MHz->74250000 + * @max_pixelclock: Pixel clock in HZ. Ex. 74.25MHz->74250000 + * @standards: Supported standards + * @capabilities: Supported capabilities + * @reserved: Must be zeroed + */ +struct v4l2_bt_timings_cap { + __u32 min_width; + __u32 max_width; + __u32 min_height; + __u32 max_height; + __u64 min_pixelclock; + __u64 max_pixelclock; + __u32 standards; + __u32 capabilities; + __u32 reserved[16]; +} __attribute__ ((packed)); + +/* Supports interlaced formats */ +#define V4L2_DV_BT_CAP_INTERLACED (1 << 0) +/* Supports progressive formats */ +#define V4L2_DV_BT_CAP_PROGRESSIVE (1 << 1) +/* Supports CVT/GTF reduced blanking */ +#define V4L2_DV_BT_CAP_REDUCED_BLANKING (1 << 2) +/* Supports custom formats */ +#define V4L2_DV_BT_CAP_CUSTOM (1 << 3) + +/** struct v4l2_dv_timings_cap - DV timings capabilities + * @type: the type of the timings (same as in struct v4l2_dv_timings) + * @pad: the pad number for which to query capabilities (used with + * v4l-subdev nodes only) + * @bt: the BT656/1120 timings capabilities + */ +struct v4l2_dv_timings_cap { + __u32 type; + __u32 pad; + __u32 reserved[2]; + union { + struct v4l2_bt_timings_cap bt; + __u32 raw_data[32]; + }; +}; + + +/* + * V I D E O I N P U T S + */ +struct v4l2_input { + __u32 index; /* Which input */ + __u8 name[32]; /* Label */ + __u32 type; /* Type of input */ + __u32 audioset; /* Associated audios (bitfield) */ + __u32 tuner; /* Tuner index */ + v4l2_std_id std; + __u32 status; + __u32 capabilities; + __u32 reserved[3]; +}; + +/* Values for the 'type' field */ +#define V4L2_INPUT_TYPE_TUNER 1 +#define V4L2_INPUT_TYPE_CAMERA 2 +#define V4L2_INPUT_TYPE_TOUCH 3 + +/* field 'status' - general */ +#define V4L2_IN_ST_NO_POWER 0x00000001 /* Attached device is off */ +#define V4L2_IN_ST_NO_SIGNAL 0x00000002 +#define V4L2_IN_ST_NO_COLOR 0x00000004 + +/* field 'status' - sensor orientation */ +/* If sensor is mounted upside down set both bits */ +#define V4L2_IN_ST_HFLIP 0x00000010 /* Frames are flipped horizontally */ +#define V4L2_IN_ST_VFLIP 0x00000020 /* Frames are flipped vertically */ + +/* field 'status' - analog */ +#define V4L2_IN_ST_NO_H_LOCK 0x00000100 /* No horizontal sync lock */ +#define V4L2_IN_ST_COLOR_KILL 0x00000200 /* Color killer is active */ +#define V4L2_IN_ST_NO_V_LOCK 0x00000400 /* No vertical sync lock */ +#define V4L2_IN_ST_NO_STD_LOCK 0x00000800 /* No standard format lock */ + +/* field 'status' - digital */ +#define V4L2_IN_ST_NO_SYNC 0x00010000 /* No synchronization lock */ +#define V4L2_IN_ST_NO_EQU 0x00020000 /* No equalizer lock */ +#define V4L2_IN_ST_NO_CARRIER 0x00040000 /* Carrier recovery failed */ + +/* field 'status' - VCR and set-top box */ +#define V4L2_IN_ST_MACROVISION 0x01000000 /* Macrovision detected */ +#define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */ +#define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */ + +/* capabilities flags */ +#define V4L2_IN_CAP_DV_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */ +#define V4L2_IN_CAP_CUSTOM_TIMINGS V4L2_IN_CAP_DV_TIMINGS /* For compatibility */ +#define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */ +#define V4L2_IN_CAP_NATIVE_SIZE 0x00000008 /* Supports setting native size */ + +/* + * V I D E O O U T P U T S + */ +struct v4l2_output { + __u32 index; /* Which output */ + __u8 name[32]; /* Label */ + __u32 type; /* Type of output */ + __u32 audioset; /* Associated audios (bitfield) */ + __u32 modulator; /* Associated modulator */ + v4l2_std_id std; + __u32 capabilities; + __u32 reserved[3]; +}; +/* Values for the 'type' field */ +#define V4L2_OUTPUT_TYPE_MODULATOR 1 +#define V4L2_OUTPUT_TYPE_ANALOG 2 +#define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3 + +/* capabilities flags */ +#define V4L2_OUT_CAP_DV_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */ +#define V4L2_OUT_CAP_CUSTOM_TIMINGS V4L2_OUT_CAP_DV_TIMINGS /* For compatibility */ +#define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */ +#define V4L2_OUT_CAP_NATIVE_SIZE 0x00000008 /* Supports setting native size */ + +/* + * C O N T R O L S + */ +struct v4l2_control { + __u32 id; + __s32 value; +}; + +struct v4l2_ext_control { + __u32 id; + __u32 size; + __u32 reserved2[1]; + union { + __s32 value; + __s64 value64; + char *string; + __u8 *p_u8; + __u16 *p_u16; + __u32 *p_u32; + __s32 *p_s32; + __s64 *p_s64; + struct v4l2_area *p_area; + struct v4l2_ctrl_h264_sps *p_h264_sps; + struct v4l2_ctrl_h264_pps *p_h264_pps; + struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix; + struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights; + struct v4l2_ctrl_h264_slice_params *p_h264_slice_params; + struct v4l2_ctrl_h264_decode_params *p_h264_decode_params; + struct v4l2_ctrl_fwht_params *p_fwht_params; + struct v4l2_ctrl_vp8_frame *p_vp8_frame; + struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence; + struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture; + struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quantisation; + struct v4l2_ctrl_vp9_compressed_hdr *p_vp9_compressed_hdr_probs; + struct v4l2_ctrl_vp9_frame *p_vp9_frame; + struct v4l2_ctrl_hevc_sps *p_hevc_sps; + struct v4l2_ctrl_hevc_pps *p_hevc_pps; + struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params; + struct v4l2_ctrl_hevc_scaling_matrix *p_hevc_scaling_matrix; + struct v4l2_ctrl_hevc_decode_params *p_hevc_decode_params; + struct v4l2_ctrl_av1_sequence *p_av1_sequence; + struct v4l2_ctrl_av1_tile_group_entry *p_av1_tile_group_entry; + struct v4l2_ctrl_av1_frame *p_av1_frame; + struct v4l2_ctrl_av1_film_grain *p_av1_film_grain; + struct v4l2_ctrl_hdr10_cll_info *p_hdr10_cll_info; + struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering_display; + void *ptr; + } __attribute__ ((packed)); +} __attribute__ ((packed)); + +struct v4l2_ext_controls { + union { + __u32 ctrl_class; + __u32 which; + }; + __u32 count; + __u32 error_idx; + __s32 request_fd; + __u32 reserved[1]; + struct v4l2_ext_control *controls; +}; + +#define V4L2_CTRL_ID_MASK (0x0fffffff) +#define V4L2_CTRL_ID2CLASS(id) ((id) & 0x0fff0000UL) +#define V4L2_CTRL_ID2WHICH(id) ((id) & 0x0fff0000UL) +#define V4L2_CTRL_DRIVER_PRIV(id) (((id) & 0xffff) >= 0x1000) +#define V4L2_CTRL_MAX_DIMS (4) +#define V4L2_CTRL_WHICH_CUR_VAL 0 +#define V4L2_CTRL_WHICH_DEF_VAL 0x0f000000 +#define V4L2_CTRL_WHICH_REQUEST_VAL 0x0f010000 + +enum v4l2_ctrl_type { + V4L2_CTRL_TYPE_INTEGER = 1, + V4L2_CTRL_TYPE_BOOLEAN = 2, + V4L2_CTRL_TYPE_MENU = 3, + V4L2_CTRL_TYPE_BUTTON = 4, + V4L2_CTRL_TYPE_INTEGER64 = 5, + V4L2_CTRL_TYPE_CTRL_CLASS = 6, + V4L2_CTRL_TYPE_STRING = 7, + V4L2_CTRL_TYPE_BITMASK = 8, + V4L2_CTRL_TYPE_INTEGER_MENU = 9, + + /* Compound types are >= 0x0100 */ + V4L2_CTRL_COMPOUND_TYPES = 0x0100, + V4L2_CTRL_TYPE_U8 = 0x0100, + V4L2_CTRL_TYPE_U16 = 0x0101, + V4L2_CTRL_TYPE_U32 = 0x0102, + V4L2_CTRL_TYPE_AREA = 0x0106, + + V4L2_CTRL_TYPE_HDR10_CLL_INFO = 0x0110, + V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY = 0x0111, + + V4L2_CTRL_TYPE_H264_SPS = 0x0200, + V4L2_CTRL_TYPE_H264_PPS = 0x0201, + V4L2_CTRL_TYPE_H264_SCALING_MATRIX = 0x0202, + V4L2_CTRL_TYPE_H264_SLICE_PARAMS = 0x0203, + V4L2_CTRL_TYPE_H264_DECODE_PARAMS = 0x0204, + V4L2_CTRL_TYPE_H264_PRED_WEIGHTS = 0x0205, + + V4L2_CTRL_TYPE_FWHT_PARAMS = 0x0220, + + V4L2_CTRL_TYPE_VP8_FRAME = 0x0240, + + V4L2_CTRL_TYPE_MPEG2_QUANTISATION = 0x0250, + V4L2_CTRL_TYPE_MPEG2_SEQUENCE = 0x0251, + V4L2_CTRL_TYPE_MPEG2_PICTURE = 0x0252, + + V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR = 0x0260, + V4L2_CTRL_TYPE_VP9_FRAME = 0x0261, + + V4L2_CTRL_TYPE_HEVC_SPS = 0x0270, + V4L2_CTRL_TYPE_HEVC_PPS = 0x0271, + V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS = 0x0272, + V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX = 0x0273, + V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS = 0x0274, + + V4L2_CTRL_TYPE_AV1_SEQUENCE = 0x280, + V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY = 0x281, + V4L2_CTRL_TYPE_AV1_FRAME = 0x282, + V4L2_CTRL_TYPE_AV1_FILM_GRAIN = 0x283, +}; + +/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */ +struct v4l2_queryctrl { + __u32 id; + __u32 type; /* enum v4l2_ctrl_type */ + __u8 name[32]; /* Whatever */ + __s32 minimum; /* Note signedness */ + __s32 maximum; + __s32 step; + __s32 default_value; + __u32 flags; + __u32 reserved[2]; +}; + +/* Used in the VIDIOC_QUERY_EXT_CTRL ioctl for querying extended controls */ +struct v4l2_query_ext_ctrl { + __u32 id; + __u32 type; + char name[32]; + __s64 minimum; + __s64 maximum; + __u64 step; + __s64 default_value; + __u32 flags; + __u32 elem_size; + __u32 elems; + __u32 nr_of_dims; + __u32 dims[V4L2_CTRL_MAX_DIMS]; + __u32 reserved[32]; +}; + +/* Used in the VIDIOC_QUERYMENU ioctl for querying menu items */ +struct v4l2_querymenu { + __u32 id; + __u32 index; + union { + __u8 name[32]; /* Whatever */ + __s64 value; + }; + __u32 reserved; +} __attribute__ ((packed)); + +/* Control flags */ +#define V4L2_CTRL_FLAG_DISABLED 0x0001 +#define V4L2_CTRL_FLAG_GRABBED 0x0002 +#define V4L2_CTRL_FLAG_READ_ONLY 0x0004 +#define V4L2_CTRL_FLAG_UPDATE 0x0008 +#define V4L2_CTRL_FLAG_INACTIVE 0x0010 +#define V4L2_CTRL_FLAG_SLIDER 0x0020 +#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040 +#define V4L2_CTRL_FLAG_VOLATILE 0x0080 +#define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100 +#define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200 +#define V4L2_CTRL_FLAG_MODIFY_LAYOUT 0x0400 +#define V4L2_CTRL_FLAG_DYNAMIC_ARRAY 0x0800 + +/* Query flags, to be ORed with the control ID */ +#define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000 +#define V4L2_CTRL_FLAG_NEXT_COMPOUND 0x40000000 + +/* User-class control IDs defined by V4L2 */ +#define V4L2_CID_MAX_CTRLS 1024 +/* IDs reserved for driver specific controls */ +#define V4L2_CID_PRIVATE_BASE 0x08000000 + + +/* + * T U N I N G + */ +struct v4l2_tuner { + __u32 index; + __u8 name[32]; + __u32 type; /* enum v4l2_tuner_type */ + __u32 capability; + __u32 rangelow; + __u32 rangehigh; + __u32 rxsubchans; + __u32 audmode; + __s32 signal; + __s32 afc; + __u32 reserved[4]; +}; + +struct v4l2_modulator { + __u32 index; + __u8 name[32]; + __u32 capability; + __u32 rangelow; + __u32 rangehigh; + __u32 txsubchans; + __u32 type; /* enum v4l2_tuner_type */ + __u32 reserved[3]; +}; + +/* Flags for the 'capability' field */ +#define V4L2_TUNER_CAP_LOW 0x0001 +#define V4L2_TUNER_CAP_NORM 0x0002 +#define V4L2_TUNER_CAP_HWSEEK_BOUNDED 0x0004 +#define V4L2_TUNER_CAP_HWSEEK_WRAP 0x0008 +#define V4L2_TUNER_CAP_STEREO 0x0010 +#define V4L2_TUNER_CAP_LANG2 0x0020 +#define V4L2_TUNER_CAP_SAP 0x0020 +#define V4L2_TUNER_CAP_LANG1 0x0040 +#define V4L2_TUNER_CAP_RDS 0x0080 +#define V4L2_TUNER_CAP_RDS_BLOCK_IO 0x0100 +#define V4L2_TUNER_CAP_RDS_CONTROLS 0x0200 +#define V4L2_TUNER_CAP_FREQ_BANDS 0x0400 +#define V4L2_TUNER_CAP_HWSEEK_PROG_LIM 0x0800 +#define V4L2_TUNER_CAP_1HZ 0x1000 + +/* Flags for the 'rxsubchans' field */ +#define V4L2_TUNER_SUB_MONO 0x0001 +#define V4L2_TUNER_SUB_STEREO 0x0002 +#define V4L2_TUNER_SUB_LANG2 0x0004 +#define V4L2_TUNER_SUB_SAP 0x0004 +#define V4L2_TUNER_SUB_LANG1 0x0008 +#define V4L2_TUNER_SUB_RDS 0x0010 + +/* Values for the 'audmode' field */ +#define V4L2_TUNER_MODE_MONO 0x0000 +#define V4L2_TUNER_MODE_STEREO 0x0001 +#define V4L2_TUNER_MODE_LANG2 0x0002 +#define V4L2_TUNER_MODE_SAP 0x0002 +#define V4L2_TUNER_MODE_LANG1 0x0003 +#define V4L2_TUNER_MODE_LANG1_LANG2 0x0004 + +struct v4l2_frequency { + __u32 tuner; + __u32 type; /* enum v4l2_tuner_type */ + __u32 frequency; + __u32 reserved[8]; +}; + +#define V4L2_BAND_MODULATION_VSB (1 << 1) +#define V4L2_BAND_MODULATION_FM (1 << 2) +#define V4L2_BAND_MODULATION_AM (1 << 3) + +struct v4l2_frequency_band { + __u32 tuner; + __u32 type; /* enum v4l2_tuner_type */ + __u32 index; + __u32 capability; + __u32 rangelow; + __u32 rangehigh; + __u32 modulation; + __u32 reserved[9]; +}; + +struct v4l2_hw_freq_seek { + __u32 tuner; + __u32 type; /* enum v4l2_tuner_type */ + __u32 seek_upward; + __u32 wrap_around; + __u32 spacing; + __u32 rangelow; + __u32 rangehigh; + __u32 reserved[5]; +}; + +/* + * R D S + */ + +struct v4l2_rds_data { + __u8 lsb; + __u8 msb; + __u8 block; +} __attribute__ ((packed)); + +#define V4L2_RDS_BLOCK_MSK 0x7 +#define V4L2_RDS_BLOCK_A 0 +#define V4L2_RDS_BLOCK_B 1 +#define V4L2_RDS_BLOCK_C 2 +#define V4L2_RDS_BLOCK_D 3 +#define V4L2_RDS_BLOCK_C_ALT 4 +#define V4L2_RDS_BLOCK_INVALID 7 + +#define V4L2_RDS_BLOCK_CORRECTED 0x40 +#define V4L2_RDS_BLOCK_ERROR 0x80 + +/* + * A U D I O + */ +struct v4l2_audio { + __u32 index; + __u8 name[32]; + __u32 capability; + __u32 mode; + __u32 reserved[2]; +}; + +/* Flags for the 'capability' field */ +#define V4L2_AUDCAP_STEREO 0x00001 +#define V4L2_AUDCAP_AVL 0x00002 + +/* Flags for the 'mode' field */ +#define V4L2_AUDMODE_AVL 0x00001 + +struct v4l2_audioout { + __u32 index; + __u8 name[32]; + __u32 capability; + __u32 mode; + __u32 reserved[2]; +}; + +/* + * M P E G S E R V I C E S + */ +#if 1 +#define V4L2_ENC_IDX_FRAME_I (0) +#define V4L2_ENC_IDX_FRAME_P (1) +#define V4L2_ENC_IDX_FRAME_B (2) +#define V4L2_ENC_IDX_FRAME_MASK (0xf) + +struct v4l2_enc_idx_entry { + __u64 offset; + __u64 pts; + __u32 length; + __u32 flags; + __u32 reserved[2]; +}; + +#define V4L2_ENC_IDX_ENTRIES (64) +struct v4l2_enc_idx { + __u32 entries; + __u32 entries_cap; + __u32 reserved[4]; + struct v4l2_enc_idx_entry entry[V4L2_ENC_IDX_ENTRIES]; +}; + + +#define V4L2_ENC_CMD_START (0) +#define V4L2_ENC_CMD_STOP (1) +#define V4L2_ENC_CMD_PAUSE (2) +#define V4L2_ENC_CMD_RESUME (3) + +/* Flags for V4L2_ENC_CMD_STOP */ +#define V4L2_ENC_CMD_STOP_AT_GOP_END (1 << 0) + +struct v4l2_encoder_cmd { + __u32 cmd; + __u32 flags; + union { + struct { + __u32 data[8]; + } raw; + }; +}; + +/* Decoder commands */ +#define V4L2_DEC_CMD_START (0) +#define V4L2_DEC_CMD_STOP (1) +#define V4L2_DEC_CMD_PAUSE (2) +#define V4L2_DEC_CMD_RESUME (3) +#define V4L2_DEC_CMD_FLUSH (4) + +/* Flags for V4L2_DEC_CMD_START */ +#define V4L2_DEC_CMD_START_MUTE_AUDIO (1 << 0) + +/* Flags for V4L2_DEC_CMD_PAUSE */ +#define V4L2_DEC_CMD_PAUSE_TO_BLACK (1 << 0) + +/* Flags for V4L2_DEC_CMD_STOP */ +#define V4L2_DEC_CMD_STOP_TO_BLACK (1 << 0) +#define V4L2_DEC_CMD_STOP_IMMEDIATELY (1 << 1) + +/* Play format requirements (returned by the driver): */ + +/* The decoder has no special format requirements */ +#define V4L2_DEC_START_FMT_NONE (0) +/* The decoder requires full GOPs */ +#define V4L2_DEC_START_FMT_GOP (1) + +/* The structure must be zeroed before use by the application + This ensures it can be extended safely in the future. */ +struct v4l2_decoder_cmd { + __u32 cmd; + __u32 flags; + union { + struct { + __u64 pts; + } stop; + + struct { + /* 0 or 1000 specifies normal speed, + 1 specifies forward single stepping, + -1 specifies backward single stepping, + >1: playback at speed/1000 of the normal speed, + <-1: reverse playback at (-speed/1000) of the normal speed. */ + __s32 speed; + __u32 format; + } start; + + struct { + __u32 data[16]; + } raw; + }; +}; +#endif + + +/* + * D A T A S E R V I C E S ( V B I ) + * + * Data services API by Michael Schimek + */ + +/* Raw VBI */ +struct v4l2_vbi_format { + __u32 sampling_rate; /* in 1 Hz */ + __u32 offset; + __u32 samples_per_line; + __u32 sample_format; /* V4L2_PIX_FMT_* */ + __s32 start[2]; + __u32 count[2]; + __u32 flags; /* V4L2_VBI_* */ + __u32 reserved[2]; /* must be zero */ +}; + +/* VBI flags */ +#define V4L2_VBI_UNSYNC (1 << 0) +#define V4L2_VBI_INTERLACED (1 << 1) + +/* ITU-R start lines for each field */ +#define V4L2_VBI_ITU_525_F1_START (1) +#define V4L2_VBI_ITU_525_F2_START (264) +#define V4L2_VBI_ITU_625_F1_START (1) +#define V4L2_VBI_ITU_625_F2_START (314) + +/* Sliced VBI + * + * This implements is a proposal V4L2 API to allow SLICED VBI + * required for some hardware encoders. It should change without + * notice in the definitive implementation. + */ + +struct v4l2_sliced_vbi_format { + __u16 service_set; + /* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field + service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field + (equals frame lines 313-336 for 625 line video + standards, 263-286 for 525 line standards) */ + __u16 service_lines[2][24]; + __u32 io_size; + __u32 reserved[2]; /* must be zero */ +}; + +/* Teletext World System Teletext + (WST), defined on ITU-R BT.653-2 */ +#define V4L2_SLICED_TELETEXT_B (0x0001) +/* Video Program System, defined on ETS 300 231*/ +#define V4L2_SLICED_VPS (0x0400) +/* Closed Caption, defined on EIA-608 */ +#define V4L2_SLICED_CAPTION_525 (0x1000) +/* Wide Screen System, defined on ITU-R BT1119.1 */ +#define V4L2_SLICED_WSS_625 (0x4000) + +#define V4L2_SLICED_VBI_525 (V4L2_SLICED_CAPTION_525) +#define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_B | V4L2_SLICED_VPS | V4L2_SLICED_WSS_625) + +struct v4l2_sliced_vbi_cap { + __u16 service_set; + /* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field + service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field + (equals frame lines 313-336 for 625 line video + standards, 263-286 for 525 line standards) */ + __u16 service_lines[2][24]; + __u32 type; /* enum v4l2_buf_type */ + __u32 reserved[3]; /* must be 0 */ +}; + +struct v4l2_sliced_vbi_data { + __u32 id; + __u32 field; /* 0: first field, 1: second field */ + __u32 line; /* 1-23 */ + __u32 reserved; /* must be 0 */ + __u8 data[48]; +}; + +/* + * Sliced VBI data inserted into MPEG Streams + */ + +/* + * V4L2_MPEG_STREAM_VBI_FMT_IVTV: + * + * Structure of payload contained in an MPEG 2 Private Stream 1 PES Packet in an + * MPEG-2 Program Pack that contains V4L2_MPEG_STREAM_VBI_FMT_IVTV Sliced VBI + * data + * + * Note, the MPEG-2 Program Pack and Private Stream 1 PES packet header + * definitions are not included here. See the MPEG-2 specifications for details + * on these headers. + */ + +/* Line type IDs */ +#define V4L2_MPEG_VBI_IVTV_TELETEXT_B (1) +#define V4L2_MPEG_VBI_IVTV_CAPTION_525 (4) +#define V4L2_MPEG_VBI_IVTV_WSS_625 (5) +#define V4L2_MPEG_VBI_IVTV_VPS (7) + +struct v4l2_mpeg_vbi_itv0_line { + __u8 id; /* One of V4L2_MPEG_VBI_IVTV_* above */ + __u8 data[42]; /* Sliced VBI data for the line */ +} __attribute__ ((packed)); + +struct v4l2_mpeg_vbi_itv0 { + __le32 linemask[2]; /* Bitmasks of VBI service lines present */ + struct v4l2_mpeg_vbi_itv0_line line[35]; +} __attribute__ ((packed)); + +struct v4l2_mpeg_vbi_ITV0 { + struct v4l2_mpeg_vbi_itv0_line line[36]; +} __attribute__ ((packed)); + +#define V4L2_MPEG_VBI_IVTV_MAGIC0 "itv0" +#define V4L2_MPEG_VBI_IVTV_MAGIC1 "ITV0" + +struct v4l2_mpeg_vbi_fmt_ivtv { + __u8 magic[4]; + union { + struct v4l2_mpeg_vbi_itv0 itv0; + struct v4l2_mpeg_vbi_ITV0 ITV0; + }; +} __attribute__ ((packed)); + +/* + * A G G R E G A T E S T R U C T U R E S + */ + +/** + * struct v4l2_plane_pix_format - additional, per-plane format definition + * @sizeimage: maximum size in bytes required for data, for which + * this plane will be used + * @bytesperline: distance in bytes between the leftmost pixels in two + * adjacent lines + * @reserved: drivers and applications must zero this array + */ +struct v4l2_plane_pix_format { + __u32 sizeimage; + __u32 bytesperline; + __u16 reserved[6]; +} __attribute__ ((packed)); + +/** + * struct v4l2_pix_format_mplane - multiplanar format definition + * @width: image width in pixels + * @height: image height in pixels + * @pixelformat: little endian four character code (fourcc) + * @field: enum v4l2_field; field order (for interlaced video) + * @colorspace: enum v4l2_colorspace; supplemental to pixelformat + * @plane_fmt: per-plane information + * @num_planes: number of planes for this format + * @flags: format flags (V4L2_PIX_FMT_FLAG_*) + * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding + * @hsv_enc: enum v4l2_hsv_encoding, HSV encoding + * @quantization: enum v4l2_quantization, colorspace quantization + * @xfer_func: enum v4l2_xfer_func, colorspace transfer function + * @reserved: drivers and applications must zero this array + */ +struct v4l2_pix_format_mplane { + __u32 width; + __u32 height; + __u32 pixelformat; + __u32 field; + __u32 colorspace; + + struct v4l2_plane_pix_format plane_fmt[VIDEO_MAX_PLANES]; + __u8 num_planes; + __u8 flags; + union { + __u8 ycbcr_enc; + __u8 hsv_enc; + }; + __u8 quantization; + __u8 xfer_func; + __u8 reserved[7]; +} __attribute__ ((packed)); + +/** + * struct v4l2_sdr_format - SDR format definition + * @pixelformat: little endian four character code (fourcc) + * @buffersize: maximum size in bytes required for data + * @reserved: drivers and applications must zero this array + */ +struct v4l2_sdr_format { + __u32 pixelformat; + __u32 buffersize; + __u8 reserved[24]; +} __attribute__ ((packed)); + +/** + * struct v4l2_meta_format - metadata format definition + * @dataformat: little endian four character code (fourcc) + * @buffersize: maximum size in bytes required for data + * @width: number of data units of data per line (valid for line + * based formats only, see format documentation) + * @height: number of lines of data per buffer (valid for line based + * formats only) + * @bytesperline: offset between the beginnings of two adjacent lines in + * bytes (valid for line based formats only) + */ +struct v4l2_meta_format { + __u32 dataformat; + __u32 buffersize; + __u32 width; + __u32 height; + __u32 bytesperline; +} __attribute__ ((packed)); + +/** + * struct v4l2_format - stream data format + * @type: enum v4l2_buf_type; type of the data stream + * @fmt.pix: definition of an image format + * @fmt.pix_mp: definition of a multiplanar image format + * @fmt.win: definition of an overlaid image + * @fmt.vbi: raw VBI capture or output parameters + * @fmt.sliced: sliced VBI capture or output parameters + * @fmt.raw_data: placeholder for future extensions and custom formats + * @fmt: union of @pix, @pix_mp, @win, @vbi, @sliced, @sdr, + * @meta and @raw_data + */ +struct v4l2_format { + __u32 type; + union { + struct v4l2_pix_format pix; /* V4L2_BUF_TYPE_VIDEO_CAPTURE */ + struct v4l2_pix_format_mplane pix_mp; /* V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE */ + struct v4l2_window win; /* V4L2_BUF_TYPE_VIDEO_OVERLAY */ + struct v4l2_vbi_format vbi; /* V4L2_BUF_TYPE_VBI_CAPTURE */ + struct v4l2_sliced_vbi_format sliced; /* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE */ + struct v4l2_sdr_format sdr; /* V4L2_BUF_TYPE_SDR_CAPTURE */ + struct v4l2_meta_format meta; /* V4L2_BUF_TYPE_META_CAPTURE */ + __u8 raw_data[200]; /* user-defined */ + } fmt; +}; + +/* Stream type-dependent parameters + */ +struct v4l2_streamparm { + __u32 type; /* enum v4l2_buf_type */ + union { + struct v4l2_captureparm capture; + struct v4l2_outputparm output; + __u8 raw_data[200]; /* user-defined */ + } parm; +}; + +/* + * E V E N T S + */ + +#define V4L2_EVENT_ALL 0 +#define V4L2_EVENT_VSYNC 1 +#define V4L2_EVENT_EOS 2 +#define V4L2_EVENT_CTRL 3 +#define V4L2_EVENT_FRAME_SYNC 4 +#define V4L2_EVENT_SOURCE_CHANGE 5 +#define V4L2_EVENT_MOTION_DET 6 +#define V4L2_EVENT_PRIVATE_START 0x08000000 + +/* Payload for V4L2_EVENT_VSYNC */ +struct v4l2_event_vsync { + /* Can be V4L2_FIELD_ANY, _NONE, _TOP or _BOTTOM */ + __u8 field; +} __attribute__ ((packed)); + +/* Payload for V4L2_EVENT_CTRL */ +#define V4L2_EVENT_CTRL_CH_VALUE (1 << 0) +#define V4L2_EVENT_CTRL_CH_FLAGS (1 << 1) +#define V4L2_EVENT_CTRL_CH_RANGE (1 << 2) +#define V4L2_EVENT_CTRL_CH_DIMENSIONS (1 << 3) + +struct v4l2_event_ctrl { + __u32 changes; + __u32 type; + union { + __s32 value; + __s64 value64; + }; + __u32 flags; + __s32 minimum; + __s32 maximum; + __s32 step; + __s32 default_value; +}; + +struct v4l2_event_frame_sync { + __u32 frame_sequence; +}; + +#define V4L2_EVENT_SRC_CH_RESOLUTION (1 << 0) + +struct v4l2_event_src_change { + __u32 changes; +}; + +#define V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ (1 << 0) + +/** + * struct v4l2_event_motion_det - motion detection event + * @flags: if V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ is set, then the + * frame_sequence field is valid. + * @frame_sequence: the frame sequence number associated with this event. + * @region_mask: which regions detected motion. + */ +struct v4l2_event_motion_det { + __u32 flags; + __u32 frame_sequence; + __u32 region_mask; +}; + +struct v4l2_event { + __u32 type; + union { + struct v4l2_event_vsync vsync; + struct v4l2_event_ctrl ctrl; + struct v4l2_event_frame_sync frame_sync; + struct v4l2_event_src_change src_change; + struct v4l2_event_motion_det motion_det; + __u8 data[64]; + } u; + __u32 pending; + __u32 sequence; + struct timespec timestamp; + __u32 id; + __u32 reserved[8]; +}; + +#define V4L2_EVENT_SUB_FL_SEND_INITIAL (1 << 0) +#define V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK (1 << 1) + +struct v4l2_event_subscription { + __u32 type; + __u32 id; + __u32 flags; + __u32 reserved[5]; +}; + +/* + * A D V A N C E D D E B U G G I N G + * + * NOTE: EXPERIMENTAL API, NEVER RELY ON THIS IN APPLICATIONS! + * FOR DEBUGGING, TESTING AND INTERNAL USE ONLY! + */ + +/* VIDIOC_DBG_G_REGISTER and VIDIOC_DBG_S_REGISTER */ + +#define V4L2_CHIP_MATCH_BRIDGE 0 /* Match against chip ID on the bridge (0 for the bridge) */ +#define V4L2_CHIP_MATCH_SUBDEV 4 /* Match against subdev index */ + +/* The following four defines are no longer in use */ +#define V4L2_CHIP_MATCH_HOST V4L2_CHIP_MATCH_BRIDGE +#define V4L2_CHIP_MATCH_I2C_DRIVER 1 /* Match against I2C driver name */ +#define V4L2_CHIP_MATCH_I2C_ADDR 2 /* Match against I2C 7-bit address */ +#define V4L2_CHIP_MATCH_AC97 3 /* Match against ancillary AC97 chip */ + +struct v4l2_dbg_match { + __u32 type; /* Match type */ + union { /* Match this chip, meaning determined by type */ + __u32 addr; + char name[32]; + }; +} __attribute__ ((packed)); + +struct v4l2_dbg_register { + struct v4l2_dbg_match match; + __u32 size; /* register size in bytes */ + __u64 reg; + __u64 val; +} __attribute__ ((packed)); + +#define V4L2_CHIP_FL_READABLE (1 << 0) +#define V4L2_CHIP_FL_WRITABLE (1 << 1) + +/* VIDIOC_DBG_G_CHIP_INFO */ +struct v4l2_dbg_chip_info { + struct v4l2_dbg_match match; + char name[32]; + __u32 flags; + __u32 reserved[32]; +} __attribute__ ((packed)); + +/** + * struct v4l2_create_buffers - VIDIOC_CREATE_BUFS argument + * @index: on return, index of the first created buffer + * @count: entry: number of requested buffers, + * return: number of created buffers + * @memory: enum v4l2_memory; buffer memory type + * @format: frame format, for which buffers are requested + * @capabilities: capabilities of this buffer type. + * @flags: additional buffer management attributes (ignored unless the + * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability + * and configured for MMAP streaming I/O). + * @max_num_buffers: if V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS capability flag is set + * this field indicate the maximum possible number of buffers + * for this queue. + * @reserved: future extensions + */ +struct v4l2_create_buffers { + __u32 index; + __u32 count; + __u32 memory; + struct v4l2_format format; + __u32 capabilities; + __u32 flags; + __u32 max_num_buffers; + __u32 reserved[5]; +}; + +/** + * struct v4l2_remove_buffers - VIDIOC_REMOVE_BUFS argument + * @index: the first buffer to be removed + * @count: number of buffers to removed + * @type: enum v4l2_buf_type + * @reserved: future extensions + */ +struct v4l2_remove_buffers { + __u32 index; + __u32 count; + __u32 type; + __u32 reserved[13]; +}; + +/* + * I O C T L C O D E S F O R V I D E O D E V I C E S + * + */ +#define VIDIOC_QUERYCAP _IOR('V', 0, struct v4l2_capability) +#define VIDIOC_ENUM_FMT _IOWR('V', 2, struct v4l2_fmtdesc) +#define VIDIOC_G_FMT _IOWR('V', 4, struct v4l2_format) +#define VIDIOC_S_FMT _IOWR('V', 5, struct v4l2_format) +#define VIDIOC_REQBUFS _IOWR('V', 8, struct v4l2_requestbuffers) +#define VIDIOC_QUERYBUF _IOWR('V', 9, struct v4l2_buffer) +#define VIDIOC_G_FBUF _IOR('V', 10, struct v4l2_framebuffer) +#define VIDIOC_S_FBUF _IOW('V', 11, struct v4l2_framebuffer) +#define VIDIOC_OVERLAY _IOW('V', 14, int) +#define VIDIOC_QBUF _IOWR('V', 15, struct v4l2_buffer) +#define VIDIOC_EXPBUF _IOWR('V', 16, struct v4l2_exportbuffer) +#define VIDIOC_DQBUF _IOWR('V', 17, struct v4l2_buffer) +#define VIDIOC_STREAMON _IOW('V', 18, int) +#define VIDIOC_STREAMOFF _IOW('V', 19, int) +#define VIDIOC_G_PARM _IOWR('V', 21, struct v4l2_streamparm) +#define VIDIOC_S_PARM _IOWR('V', 22, struct v4l2_streamparm) +#define VIDIOC_G_STD _IOR('V', 23, v4l2_std_id) +#define VIDIOC_S_STD _IOW('V', 24, v4l2_std_id) +#define VIDIOC_ENUMSTD _IOWR('V', 25, struct v4l2_standard) +#define VIDIOC_ENUMINPUT _IOWR('V', 26, struct v4l2_input) +#define VIDIOC_G_CTRL _IOWR('V', 27, struct v4l2_control) +#define VIDIOC_S_CTRL _IOWR('V', 28, struct v4l2_control) +#define VIDIOC_G_TUNER _IOWR('V', 29, struct v4l2_tuner) +#define VIDIOC_S_TUNER _IOW('V', 30, struct v4l2_tuner) +#define VIDIOC_G_AUDIO _IOR('V', 33, struct v4l2_audio) +#define VIDIOC_S_AUDIO _IOW('V', 34, struct v4l2_audio) +#define VIDIOC_QUERYCTRL _IOWR('V', 36, struct v4l2_queryctrl) +#define VIDIOC_QUERYMENU _IOWR('V', 37, struct v4l2_querymenu) +#define VIDIOC_G_INPUT _IOR('V', 38, int) +#define VIDIOC_S_INPUT _IOWR('V', 39, int) +#define VIDIOC_G_EDID _IOWR('V', 40, struct v4l2_edid) +#define VIDIOC_S_EDID _IOWR('V', 41, struct v4l2_edid) +#define VIDIOC_G_OUTPUT _IOR('V', 46, int) +#define VIDIOC_S_OUTPUT _IOWR('V', 47, int) +#define VIDIOC_ENUMOUTPUT _IOWR('V', 48, struct v4l2_output) +#define VIDIOC_G_AUDOUT _IOR('V', 49, struct v4l2_audioout) +#define VIDIOC_S_AUDOUT _IOW('V', 50, struct v4l2_audioout) +#define VIDIOC_G_MODULATOR _IOWR('V', 54, struct v4l2_modulator) +#define VIDIOC_S_MODULATOR _IOW('V', 55, struct v4l2_modulator) +#define VIDIOC_G_FREQUENCY _IOWR('V', 56, struct v4l2_frequency) +#define VIDIOC_S_FREQUENCY _IOW('V', 57, struct v4l2_frequency) +#define VIDIOC_CROPCAP _IOWR('V', 58, struct v4l2_cropcap) +#define VIDIOC_G_CROP _IOWR('V', 59, struct v4l2_crop) +#define VIDIOC_S_CROP _IOW('V', 60, struct v4l2_crop) +#define VIDIOC_G_JPEGCOMP _IOR('V', 61, struct v4l2_jpegcompression) +#define VIDIOC_S_JPEGCOMP _IOW('V', 62, struct v4l2_jpegcompression) +#define VIDIOC_QUERYSTD _IOR('V', 63, v4l2_std_id) +#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format) +#define VIDIOC_ENUMAUDIO _IOWR('V', 65, struct v4l2_audio) +#define VIDIOC_ENUMAUDOUT _IOWR('V', 66, struct v4l2_audioout) +#define VIDIOC_G_PRIORITY _IOR('V', 67, __u32) /* enum v4l2_priority */ +#define VIDIOC_S_PRIORITY _IOW('V', 68, __u32) /* enum v4l2_priority */ +#define VIDIOC_G_SLICED_VBI_CAP _IOWR('V', 69, struct v4l2_sliced_vbi_cap) +#define VIDIOC_LOG_STATUS _IO('V', 70) +#define VIDIOC_G_EXT_CTRLS _IOWR('V', 71, struct v4l2_ext_controls) +#define VIDIOC_S_EXT_CTRLS _IOWR('V', 72, struct v4l2_ext_controls) +#define VIDIOC_TRY_EXT_CTRLS _IOWR('V', 73, struct v4l2_ext_controls) +#define VIDIOC_ENUM_FRAMESIZES _IOWR('V', 74, struct v4l2_frmsizeenum) +#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR('V', 75, struct v4l2_frmivalenum) +#define VIDIOC_G_ENC_INDEX _IOR('V', 76, struct v4l2_enc_idx) +#define VIDIOC_ENCODER_CMD _IOWR('V', 77, struct v4l2_encoder_cmd) +#define VIDIOC_TRY_ENCODER_CMD _IOWR('V', 78, struct v4l2_encoder_cmd) + +/* + * Experimental, meant for debugging, testing and internal use. + * Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined. + * You must be root to use these ioctls. Never use these in applications! + */ +#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register) +#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register) + +#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) +#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings) +#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings) +#define VIDIOC_DQEVENT _IOR('V', 89, struct v4l2_event) +#define VIDIOC_SUBSCRIBE_EVENT _IOW('V', 90, struct v4l2_event_subscription) +#define VIDIOC_UNSUBSCRIBE_EVENT _IOW('V', 91, struct v4l2_event_subscription) +#define VIDIOC_CREATE_BUFS _IOWR('V', 92, struct v4l2_create_buffers) +#define VIDIOC_PREPARE_BUF _IOWR('V', 93, struct v4l2_buffer) +#define VIDIOC_G_SELECTION _IOWR('V', 94, struct v4l2_selection) +#define VIDIOC_S_SELECTION _IOWR('V', 95, struct v4l2_selection) +#define VIDIOC_DECODER_CMD _IOWR('V', 96, struct v4l2_decoder_cmd) +#define VIDIOC_TRY_DECODER_CMD _IOWR('V', 97, struct v4l2_decoder_cmd) +#define VIDIOC_ENUM_DV_TIMINGS _IOWR('V', 98, struct v4l2_enum_dv_timings) +#define VIDIOC_QUERY_DV_TIMINGS _IOR('V', 99, struct v4l2_dv_timings) +#define VIDIOC_DV_TIMINGS_CAP _IOWR('V', 100, struct v4l2_dv_timings_cap) +#define VIDIOC_ENUM_FREQ_BANDS _IOWR('V', 101, struct v4l2_frequency_band) + +/* + * Experimental, meant for debugging, testing and internal use. + * Never use this in applications! + */ +#define VIDIOC_DBG_G_CHIP_INFO _IOWR('V', 102, struct v4l2_dbg_chip_info) + +#define VIDIOC_QUERY_EXT_CTRL _IOWR('V', 103, struct v4l2_query_ext_ctrl) +#define VIDIOC_REMOVE_BUFS _IOWR('V', 104, struct v4l2_remove_buffers) + + +/* Reminder: when adding new ioctls please add support for them to + drivers/media/v4l2-core/v4l2-compat-ioctl32.c as well! */ + +#define BASE_VIDIOC_PRIVATE 192 /* 192-255 are private */ + +/* Deprecated definitions kept for backwards compatibility */ +#define V4L2_PIX_FMT_HM12 V4L2_PIX_FMT_NV12_16L16 +#define V4L2_PIX_FMT_SUNXI_TILED_NV12 V4L2_PIX_FMT_NV12_32L32 +/* + * This capability was never implemented, anyone using this cap should drop it + * from their code. + */ +#define V4L2_CAP_ASYNCIO 0x02000000 + +#endif /* __LINUX_VIDEODEV2_H */ diff --git a/spider-cam/libcamera/include/meson.build b/spider-cam/libcamera/include/meson.build new file mode 100644 index 0000000..19b93a7 --- /dev/null +++ b/spider-cam/libcamera/include/meson.build @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: CC0-1.0 + +include_build_dir = meson.current_build_dir() + +subdir('android') +subdir('libcamera') diff --git a/spider-cam/libcamera/meson.build b/spider-cam/libcamera/meson.build new file mode 100644 index 0000000..2acd8c3 --- /dev/null +++ b/spider-cam/libcamera/meson.build @@ -0,0 +1,294 @@ +# SPDX-License-Identifier: CC0-1.0 + +project('libcamera', 'c', 'cpp', + meson_version : '>= 0.60', + version : '0.3.0', + default_options : [ + 'werror=true', + 'warning_level=2', + 'cpp_std=c++17', + ], + license : 'LGPL 2.1+') + +# Generate version information. The libcamera_git_version variable contains the +# full version with build metadata (patch count and SHA1, e.g. +# 1.2.3+211-c94a24f4), while the libcamera_version variable contains the +# major.minor.patch (e.g. 1.2.3) only. +# +# If the source tree matches the last git version tag, the build metadata +# (e.g. +211-c94a24f4) is omitted from libcamera_git_version. +libcamera_git_version = run_command('utils/gen-version.sh', + meson.project_build_root(), + meson.project_source_root(), + check : false).stdout().strip() + +# If the source tree isn't under git control, set libcamera_git_version to the +# meson project version. +if libcamera_git_version == '' + libcamera_git_version = meson.project_version() +endif + +libcamera_version = libcamera_git_version.split('+')[0] +project_version = meson.project_version().split('+')[0] + +# A shallow clone, or a clone without a reachable tag equivalent to the +# meson.project_version() could leave the project in a mis-described state. +# Produce a warning in this event, and fix to a best effort. +if libcamera_version != project_version + warning('The sources and meson.build disagree about the version: ' + + libcamera_version + ' != ' + project_version) + + summary({'libcamera git version' : libcamera_git_version, + 'Source version match' : false, + }, + bool_yn : true, section : 'Versions') + + # Re-run gen-version.sh to replace the git version (major.minor.patch) with + # the meson project version. The build metadata provided by git are kept. + libcamera_git_version = run_command('utils/gen-version.sh', + meson.project_build_root(), + meson.project_source_root(), + project_version, + check : false).stdout().strip() + libcamera_version = project_version + + # Append a marker to show we have modified this version string. + libcamera_git_version += '-nvm' +endif + +# The major and minor libcamera version components are used as the soname. +# No ABI/API compatibility is guaranteed between releases (x.y). +# +# When we declare a stable ABI/API we will provide a 1.0 release and the +# soversion at that point will be the 'major' release value (x). +semver = libcamera_version.split('.') +libcamera_soversion = semver[0] + '.' + semver[1] + +summary({ 'Sources': libcamera_git_version, }, section : 'Versions') + +# This script generates the .tarball-version file on a 'meson dist' command. +meson.add_dist_script('utils/run-dist.sh') + +# Configure the build environment. +cc = meson.get_compiler('c') +cxx = meson.get_compiler('cpp') +config_h = configuration_data() + +if cc.has_header_symbol('fcntl.h', 'F_ADD_SEALS', prefix : '#define _GNU_SOURCE') + config_h.set('HAVE_FILE_SEALS', 1) +endif + +if cc.has_header_symbol('unistd.h', 'issetugid') + config_h.set('HAVE_ISSETUGID', 1) +endif + +if cc.has_header_symbol('locale.h', 'locale_t', prefix : '#define _GNU_SOURCE') + config_h.set('HAVE_LOCALE_T', 1) +endif + +if cc.has_header_symbol('sys/mman.h', 'memfd_create', prefix : '#define _GNU_SOURCE') + config_h.set('HAVE_MEMFD_CREATE', 1) +endif + +if cc.has_header_symbol('stdlib.h', 'secure_getenv', prefix : '#define _GNU_SOURCE') + config_h.set('HAVE_SECURE_GETENV', 1) +endif + +common_arguments = [ + '-Wmissing-declarations', + '-Wshadow', + '-include', meson.current_build_dir() / 'config.h', +] + +c_arguments = [] +cpp_arguments = [] + +if cc.get_id() == 'clang' + if cc.version().version_compare('<9') + error('clang version is too old, libcamera requires 9.0 or newer') + endif + + # Turn _FORTIFY_SOURCE by default on. This is needed on clang only as gcc + # enables it by default. FORTIFY will not work properly with `-O0`, and may + # result in macro redefinition errors if the user already has a setting for + # `-D_FORTIFY_SOURCE`. Do not enable FORTIFY in either of those cases. + if get_option('optimization') != '0' + fortify = cc.get_define('_FORTIFY_SOURCE') + if fortify == '' + message('Adding _FORTIFY_SOURCE') + common_arguments += [ + '-D_FORTIFY_SOURCE=2', + ] + endif + endif + + # Use libc++ by default if available instead of libstdc++ when compiling + # with clang. + if cc.find_library('c++', required : false).found() + cpp_arguments += [ + '-stdlib=libc++', + ] + endif + + cpp_arguments += [ + '-Wextra-semi', + '-Wthread-safety', + ] +endif + +if cc.get_id() == 'gcc' + if cc.version().version_compare('<8') + error('gcc version is too old, libcamera requires 8.0 or newer') + endif + + # On gcc 8, the file system library is provided in a separate static + # library. + if cc.version().version_compare('<9') + cpp_arguments += [ + '-lstdc++fs', + ] + endif + + # gcc 13 implements the C++23 version of automatic move from local + # variables in return statements (see + # https://en.cppreference.com/w/cpp/language/return). As a result, some + # previously required explicit std::move() in return statements generate + # warnings. Those moves can't be removed as older compiler versions could + # use copy constructors instead of move constructors. The easiest fix is to + # disable the warning. With -Wpessimizing-move enabled, the compiler will + # still warn of pessimizing moves, only the redundant but not pessimizing + # moves will be ignored. + if cc.version().version_compare('>=13') + cpp_arguments += [ + '-Wno-redundant-move', + ] + endif + + # gcc 7.1 introduced processor-specific ABI breakages related to parameter + # passing on ARM platforms. This generates a large number of messages + # during compilation. Silence them. + if host_machine.cpu_family() == 'arm' + cpp_arguments += [ + '-Wno-psabi', + ] + endif +endif + +# We use C99 designated initializers for arrays as C++ has no equivalent +# feature. Both gcc and clang support this extension, but recent +# versions of clang generate a warning that needs to be disabled. +if cc.has_argument('-Wno-c99-designator') + common_arguments += [ + '-Wno-c99-designator', + ] +endif + +c_arguments += common_arguments +cpp_arguments += common_arguments + +add_project_arguments(c_arguments, language : 'c') +add_project_arguments(cpp_arguments, language : 'cpp') +add_project_link_arguments(cpp_arguments, language : 'cpp') + +libcamera_includes = include_directories('include') + +# Sub-directories fill py_modules with their dependencies. +py_modules = [] + +# Libraries used by multiple components +liblttng = dependency('lttng-ust', required : get_option('tracing')) + +# Pipeline handlers +# +pipelines = get_option('pipelines') + +arch_arm = ['arm', 'aarch64'] +arch_x86 = ['x86', 'x86_64'] +pipelines_support = { + 'imx8-isi': arch_arm, + 'ipu3': arch_x86, + 'mali-c55': arch_arm, + 'rkisp1': arch_arm, + 'rpi/vc4': arch_arm, + 'simple': arch_arm, + 'uvcvideo': ['any'], + 'vimc': ['test'], +} + +if pipelines.contains('all') + pipelines = pipelines_support.keys() +elif pipelines.contains('auto') + host_cpu = host_machine.cpu_family() + pipelines = [] + foreach pipeline, archs : pipelines_support + if host_cpu in archs or 'any' in archs + pipelines += pipeline + endif + endforeach +endif + +# Tests require the vimc pipeline handler, include it automatically when tests +# are enabled. +if get_option('test') + foreach pipeline, archs : pipelines_support + if 'test' in archs and pipeline not in pipelines + message('Enabling ' + pipeline + ' pipeline handler for tests') + pipelines += pipeline + endif + endforeach +endif + +# Utilities are parsed first to provide support for other components. +subdir('utils') + +subdir('include') +subdir('src') + +# The documentation and test components are optional and can be disabled +# through configuration values. They are enabled by default. + +subdir('Documentation') +subdir('test') + +if not meson.is_cross_build() + kernel_version_req = '>= 5.0.0' + kernel_version = run_command('uname', '-r', check : true).stdout().strip() + if not kernel_version.version_compare(kernel_version_req) + warning('The current running kernel version @0@ is too old to run libcamera.' + .format(kernel_version)) + warning('If you intend to use libcamera on this machine, please upgrade to a kernel @0@.' + .format(kernel_version_req)) + endif +endif + +# Create a symlink from the build root to the source root. This is used when +# running libcamera from the build directory to locate resources in the source +# directory (such as IPA configuration files). +run_command('ln', '-fsT', meson.project_source_root(), meson.project_build_root() / 'source', + check : true) + +configure_file(output : 'config.h', configuration : config_h) + +# Check for python installation and modules. +py_mod = import('python') +py_mod.find_installation('python3', modules : py_modules) + +## Summarise Configurations +summary({ + 'Enabled pipelines': pipelines, + 'Enabled IPA modules': enabled_ipa_names, + 'Controls files': controls_files, + 'Properties files': properties_files, + 'Hotplug support': libudev.found(), + 'Tracing support': tracing_enabled, + 'Android support': android_enabled, + 'GStreamer support': gst_enabled, + 'Python bindings': pycamera_enabled, + 'V4L2 emulation support': v4l2_enabled, + 'cam application': cam_enabled, + 'qcam application': qcam_enabled, + 'lc-compliance application': lc_compliance_enabled, + 'Unit tests': test_enabled, + }, + section : 'Configuration', + bool_yn : true) diff --git a/spider-cam/libcamera/meson_options.txt b/spider-cam/libcamera/meson_options.txt new file mode 100644 index 0000000..7aa4124 --- /dev/null +++ b/spider-cam/libcamera/meson_options.txt @@ -0,0 +1,88 @@ +# SPDX-License-Identifier: CC0-1.0 + +option('android', + type : 'feature', + value : 'disabled', + description : 'Compile libcamera with Android Camera3 HAL interface') + +option('android_platform', + type : 'combo', + choices : ['cros', 'generic'], + value : 'generic', + description : 'Select the Android platform to compile for') + +option('cam', + type : 'feature', + value : 'auto', + description : 'Compile the cam test application') + +option('documentation', + type : 'feature', + description : 'Generate the project documentation') + +option('doc_werror', + type : 'boolean', + value : false, + description : 'Treat documentation warnings as errors') + +option('gstreamer', + type : 'feature', + value : 'auto', + description : 'Compile libcamera GStreamer plugin') + +option('ipas', + type : 'array', + choices : ['ipu3', 'rkisp1', 'rpi/vc4', 'simple', 'vimc'], + description : 'Select which IPA modules to build') + +option('lc-compliance', + type : 'feature', + value : 'auto', + description : 'Compile the lc-compliance test application') + +option('pipelines', + type : 'array', + value : ['auto'], + choices : [ + 'all', + 'auto', + 'imx8-isi', + 'ipu3', + 'mali-c55', + 'rkisp1', + 'rpi/vc4', + 'simple', + 'uvcvideo', + 'vimc' + ], + description : 'Select which pipeline handlers to build. If this is set to "auto", all the pipelines applicable to the target architecture will be built. If this is set to "all", all the pipelines will be built. If both are selected then "all" will take precedence.') + +option('pycamera', + type : 'feature', + value : 'auto', + description : 'Enable libcamera Python bindings (experimental)') + +option('qcam', + type : 'feature', + value : 'auto', + description : 'Compile the qcam test application') + +option('test', + type : 'boolean', + value : false, + description : 'Compile and include the tests') + +option('tracing', + type : 'feature', + value : 'auto', + description : 'Enable tracing (based on lttng)') + +option('udev', + type : 'feature', + value : 'auto', + description : 'Enable udev support for hotplug') + +option('v4l2', + type : 'boolean', + value : false, + description : 'Compile the V4L2 compatibility layer') diff --git a/spider-cam/libcamera/package/gentoo/media-libs/libcamera/libcamera-9999.ebuild b/spider-cam/libcamera/package/gentoo/media-libs/libcamera/libcamera-9999.ebuild new file mode 100644 index 0000000..65619e0 --- /dev/null +++ b/spider-cam/libcamera/package/gentoo/media-libs/libcamera/libcamera-9999.ebuild @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright 2019 Google Inc. + +EAPI=6 +PYTHON_COMPAT=( python3_{7..10} ) + +inherit git-r3 meson python-any-r1 + +DESCRIPTION="Camera support library for Linux" +HOMEPAGE="http://libcamera.org" +EGIT_REPO_URI="https://git.libcamera.org/libcamera/libcamera.git" +EGIT_BRANCH="master" + +LICENSE="LGPL-2.1+" +SLOT="0" +KEYWORDS="*" +IUSE="debug doc test udev" + +RDEPEND=" + >=net-libs/gnutls-3.3:= + udev? ( virtual/libudev ) +" + +DEPEND=" + ${RDEPEND} + dev-libs/openssl + $(python_gen_any_dep 'dev-python/pyyaml[${PYTHON_USEDEP}]') +" + +src_configure() { + local emesonargs=( + $(meson_feature doc documentation) + $(meson_use test) + --buildtype $(usex debug debug plain) + ) + meson_src_configure +} + +src_compile() { + meson_src_compile +} + +src_install() { + meson_src_install +} diff --git a/spider-cam/libcamera/src/android/camera3_hal.cpp b/spider-cam/libcamera/src/android/camera3_hal.cpp new file mode 100644 index 0000000..a5ad237 --- /dev/null +++ b/spider-cam/libcamera/src/android/camera3_hal.cpp @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Android Camera HALv3 module + */ + +#include + +#include + +#include "camera_device.h" +#include "camera_hal_manager.h" + +using namespace libcamera; + +LOG_DEFINE_CATEGORY(HAL) + +/*------------------------------------------------------------------------------ + * Android Camera HAL callbacks + */ + +static int hal_get_number_of_cameras() +{ + return CameraHalManager::instance()->numCameras(); +} + +static int hal_get_camera_info(int id, struct camera_info *info) +{ + return CameraHalManager::instance()->getCameraInfo(id, info); +} + +static int hal_set_callbacks(const camera_module_callbacks_t *callbacks) +{ + CameraHalManager::instance()->setCallbacks(callbacks); + + return 0; +} + +static int hal_open_legacy([[maybe_unused]] const struct hw_module_t *module, + [[maybe_unused]] const char *id, + [[maybe_unused]] uint32_t halVersion, + [[maybe_unused]] struct hw_device_t **device) +{ + return -ENOSYS; +} + +static int hal_set_torch_mode([[maybe_unused]] const char *camera_id, + [[maybe_unused]] bool enabled) +{ + return -ENOSYS; +} + +/* + * First entry point of the camera HAL module. + * + * Initialize the HAL but does not open any camera device yet (see hal_dev_open) + */ +static int hal_init() +{ + LOG(HAL, Info) << "Initialising Android camera HAL"; + + CameraHalManager::instance()->init(); + + return 0; +} + +/*------------------------------------------------------------------------------ + * Android Camera Device + */ + +static int hal_dev_open(const hw_module_t *module, const char *name, + hw_device_t **device) +{ + LOG(HAL, Debug) << "Open camera " << name; + + int id = atoi(name); + + auto [camera, ret] = CameraHalManager::instance()->open(id, module); + if (!camera) { + LOG(HAL, Error) + << "Failed to open camera module '" << id << "'"; + return ret == -EBUSY ? -EUSERS : ret; + } + + *device = &camera->camera3Device()->common; + + return 0; +} + +static struct hw_module_methods_t hal_module_methods = { + .open = hal_dev_open, +}; + +camera_module_t HAL_MODULE_INFO_SYM = { + .common = { + .tag = HARDWARE_MODULE_TAG, + .module_api_version = CAMERA_MODULE_API_VERSION_2_4, + .hal_api_version = HARDWARE_HAL_API_VERSION, + .id = CAMERA_HARDWARE_MODULE_ID, + .name = "libcamera camera HALv3 module", + .author = "libcamera", + .methods = &hal_module_methods, + .dso = nullptr, + .reserved = {}, + }, + + .get_number_of_cameras = hal_get_number_of_cameras, + .get_camera_info = hal_get_camera_info, + .set_callbacks = hal_set_callbacks, + .get_vendor_tag_ops = nullptr, + .open_legacy = hal_open_legacy, + .set_torch_mode = hal_set_torch_mode, + .init = hal_init, + .reserved = {}, +}; diff --git a/spider-cam/libcamera/src/android/camera_buffer.h b/spider-cam/libcamera/src/android/camera_buffer.h new file mode 100644 index 0000000..9666996 --- /dev/null +++ b/spider-cam/libcamera/src/android/camera_buffer.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Frame buffer handling interface definition + */ + +#pragma once + +#include + +#include +#include +#include +#include + +class CameraBuffer final : public libcamera::Extensible +{ + LIBCAMERA_DECLARE_PRIVATE() + +public: + CameraBuffer(buffer_handle_t camera3Buffer, + libcamera::PixelFormat pixelFormat, + const libcamera::Size &size, int flags); + ~CameraBuffer(); + + bool isValid() const; + + unsigned int numPlanes() const; + + libcamera::Span plane(unsigned int plane) const; + libcamera::Span plane(unsigned int plane); + + unsigned int stride(unsigned int plane) const; + unsigned int offset(unsigned int plane) const; + unsigned int size(unsigned int plane) const; + + size_t jpegBufferSize(size_t maxJpegBufferSize) const; +}; + +#define PUBLIC_CAMERA_BUFFER_IMPLEMENTATION \ +CameraBuffer::CameraBuffer(buffer_handle_t camera3Buffer, \ + libcamera::PixelFormat pixelFormat, \ + const libcamera::Size &size, int flags) \ + : Extensible(std::make_unique(this, camera3Buffer, \ + pixelFormat, size, \ + flags)) \ +{ \ +} \ +CameraBuffer::~CameraBuffer() \ +{ \ +} \ +bool CameraBuffer::isValid() const \ +{ \ + return _d()->isValid(); \ +} \ +unsigned int CameraBuffer::numPlanes() const \ +{ \ + return _d()->numPlanes(); \ +} \ +Span CameraBuffer::plane(unsigned int plane) const \ +{ \ + return const_cast(_d())->plane(plane); \ +} \ +Span CameraBuffer::plane(unsigned int plane) \ +{ \ + return _d()->plane(plane); \ +} \ +unsigned int CameraBuffer::stride(unsigned int plane) const \ +{ \ + return _d()->stride(plane); \ +} \ +unsigned int CameraBuffer::offset(unsigned int plane) const \ +{ \ + return _d()->offset(plane); \ +} \ +unsigned int CameraBuffer::size(unsigned int plane) const \ +{ \ + return _d()->size(plane); \ +} \ +size_t CameraBuffer::jpegBufferSize(size_t maxJpegBufferSize) const \ +{ \ + return _d()->jpegBufferSize(maxJpegBufferSize); \ +} diff --git a/spider-cam/libcamera/src/android/camera_capabilities.cpp b/spider-cam/libcamera/src/android/camera_capabilities.cpp new file mode 100644 index 0000000..71043e1 --- /dev/null +++ b/spider-cam/libcamera/src/android/camera_capabilities.cpp @@ -0,0 +1,1608 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Camera static properties manager + */ + +#include "camera_capabilities.h" + +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include + +#include "libcamera/internal/formats.h" + +using namespace libcamera; + +LOG_DECLARE_CATEGORY(HAL) + +namespace { + +/* + * \var camera3Resolutions + * \brief The list of image resolutions commonly supported by Android + * + * The following are defined as mandatory to be supported by the Android + * Camera3 specification: (320x240), (640x480), (1280x720), (1920x1080). + * + * The following 4:3 resolutions are defined as optional, but commonly + * supported by Android devices: (1280x960), (1600x1200). + */ +const std::vector camera3Resolutions = { + { 320, 240 }, + { 640, 480 }, + { 1280, 720 }, + { 1280, 960 }, + { 1600, 1200 }, + { 1920, 1080 } +}; + +/* + * \struct Camera3Format + * \brief Data associated with an Android format identifier + * \var libcameraFormats List of libcamera pixel formats compatible with the + * Android format + * \var name The human-readable representation of the Android format code + */ +struct Camera3Format { + std::vector libcameraFormats; + bool mandatory; + const char *name; +}; + +/* + * \var camera3FormatsMap + * \brief Associate Android format code with ancillary data + */ +const std::map camera3FormatsMap = { + { + HAL_PIXEL_FORMAT_BLOB, { + { formats::MJPEG }, + true, + "BLOB" + } + }, { + HAL_PIXEL_FORMAT_YCbCr_420_888, { + { formats::NV12, formats::NV21 }, + true, + "YCbCr_420_888" + } + }, { + /* + * \todo Translate IMPLEMENTATION_DEFINED inspecting the gralloc + * usage flag. For now, copy the YCbCr_420 configuration. + */ + HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, { + { formats::NV12, formats::NV21 }, + true, + "IMPLEMENTATION_DEFINED" + } + }, { + HAL_PIXEL_FORMAT_RAW10, { + { + formats::SBGGR10_CSI2P, + formats::SGBRG10_CSI2P, + formats::SGRBG10_CSI2P, + formats::SRGGB10_CSI2P + }, + false, + "RAW10" + } + }, { + HAL_PIXEL_FORMAT_RAW12, { + { + formats::SBGGR12_CSI2P, + formats::SGBRG12_CSI2P, + formats::SGRBG12_CSI2P, + formats::SRGGB12_CSI2P + }, + false, + "RAW12" + } + }, { + HAL_PIXEL_FORMAT_RAW16, { + { + formats::SBGGR16, + formats::SGBRG16, + formats::SGRBG16, + formats::SRGGB16 + }, + false, + "RAW16" + } + }, +}; + +const std::map +hwLevelStrings = { + { ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED, "LIMITED" }, + { ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL, "FULL" }, + { ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY, "LEGACY" }, + { ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_3, "LEVEL_3" }, + { ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL, "EXTERNAL" }, +}; + +enum class ControlRange { + Min, + Def, + Max, +}; + +/** + * \brief Set Android metadata from libcamera ControlInfo or a default value + * \tparam T Type of the control in libcamera + * \tparam U Type of the metadata in Android + * \param[in] metadata Android metadata pack to add the control value to + * \param[in] tag Android metadata tag + * \param[in] controlsInfo libcamera ControlInfoMap from which to find the control info + * \param[in] control libcamera ControlId to find from \a controlsInfo + * \param[in] controlRange Whether to use the min, def, or max value from the control info + * \param[in] defaultValue The value to set in \a metadata if \a control is not found + * + * Set the Android metadata entry in \a metadata with tag \a tag based on the + * control info found for the libcamera control \a control in the libcamera + * ControlInfoMap \a controlsInfo. If no libcamera ControlInfo is found, then + * the Android metadata entry is set to \a defaultValue. + * + * This function is for scalar values. + */ +template +U setMetadata(CameraMetadata *metadata, uint32_t tag, + const ControlInfoMap &controlsInfo, const Control &control, + enum ControlRange controlRange, const U defaultValue) +{ + U value = defaultValue; + + const auto &info = controlsInfo.find(&control); + if (info != controlsInfo.end()) { + switch (controlRange) { + case ControlRange::Min: + value = static_cast(info->second.min().template get()); + break; + case ControlRange::Def: + value = static_cast(info->second.def().template get()); + break; + case ControlRange::Max: + value = static_cast(info->second.max().template get()); + break; + } + } + + metadata->addEntry(tag, value); + return value; +} + +/** + * \brief Set Android metadata from libcamera ControlInfo or a default value + * \tparam T Type of the control in libcamera + * \tparam U Type of the metadata in Android + * \param[in] metadata Android metadata pack to add the control value to + * \param[in] tag Android metadata tag + * \param[in] controlsInfo libcamera ControlInfoMap from which to find the control info + * \param[in] control libcamera ControlId to find from \a controlsInfo + * \param[in] defaultVector The value to set in \a metadata if \a control is not found + * + * Set the Android metadata entry in \a metadata with tag \a tag based on the + * control info found for the libcamera control \a control in the libcamera + * ControlInfoMap \a controlsInfo. If no libcamera ControlInfo is found, then + * the Android metadata entry is set to \a defaultVector. + * + * This function is for vector values. + */ +template +std::vector setMetadata(CameraMetadata *metadata, uint32_t tag, + const ControlInfoMap &controlsInfo, + const Control &control, + const std::vector &defaultVector) +{ + const auto &info = controlsInfo.find(&control); + if (info == controlsInfo.end()) { + metadata->addEntry(tag, defaultVector); + return defaultVector; + } + + std::vector values(info->second.values().size()); + for (const auto &value : info->second.values()) + values.push_back(static_cast(value.template get())); + metadata->addEntry(tag, values); + + return values; +} + +} /* namespace */ + +bool CameraCapabilities::validateManualSensorCapability() +{ + const char *noMode = "Manual sensor capability unavailable: "; + + if (!staticMetadata_->entryContains(ANDROID_CONTROL_AE_AVAILABLE_MODES, + ANDROID_CONTROL_AE_MODE_OFF)) { + LOG(HAL, Info) << noMode << "missing AE mode off"; + return false; + } + + if (!staticMetadata_->entryContains(ANDROID_CONTROL_AE_LOCK_AVAILABLE, + ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE)) { + LOG(HAL, Info) << noMode << "missing AE lock"; + return false; + } + + /* + * \todo Return true here after we satisfy all the requirements: + * https://developer.android.com/reference/android/hardware/camera2/CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR + * Manual frame duration control + * android.sensor.frameDuration + * android.sensor.info.maxFrameDuration + * Manual exposure control + * android.sensor.exposureTime + * android.sensor.info.exposureTimeRange + * Manual sensitivity control + * android.sensor.sensitivity + * android.sensor.info.sensitivityRange + * Manual lens control (if the lens is adjustable) + * android.lens.* + * Manual flash control (if a flash unit is present) + * android.flash.* + * Manual black level locking + * android.blackLevel.lock + * Auto exposure lock + * android.control.aeLock + */ + return false; +} + +bool CameraCapabilities::validateManualPostProcessingCapability() +{ + const char *noMode = "Manual post processing capability unavailable: "; + + if (!staticMetadata_->entryContains(ANDROID_CONTROL_AWB_AVAILABLE_MODES, + ANDROID_CONTROL_AWB_MODE_OFF)) { + LOG(HAL, Info) << noMode << "missing AWB mode off"; + return false; + } + + if (!staticMetadata_->entryContains(ANDROID_CONTROL_AWB_LOCK_AVAILABLE, + ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE)) { + LOG(HAL, Info) << noMode << "missing AWB lock"; + return false; + } + + /* + * \todo return true here after we satisfy all the requirements: + * https://developer.android.com/reference/android/hardware/camera2/CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING + * Manual tonemap control + * android.tonemap.curve + * android.tonemap.mode + * android.tonemap.maxCurvePoints + * android.tonemap.gamma + * android.tonemap.presetCurve + * Manual white balance control + * android.colorCorrection.transform + * android.colorCorrection.gains + * Manual lens shading map control + * android.shading.mode + * android.statistics.lensShadingMapMode + * android.statistics.lensShadingMap + * android.lens.info.shadingMapSize + * Manual aberration correction control (if aberration correction is supported) + * android.colorCorrection.aberrationMode + * android.colorCorrection.availableAberrationModes + * Auto white balance lock + * android.control.awbLock + */ + return false; +} + +bool CameraCapabilities::validateBurstCaptureCapability() +{ + camera_metadata_ro_entry_t entry; + bool found; + + const char *noMode = "Burst capture capability unavailable: "; + + if (!staticMetadata_->entryContains(ANDROID_CONTROL_AE_LOCK_AVAILABLE, + ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE)) { + LOG(HAL, Info) << noMode << "missing AE lock"; + return false; + } + + if (!staticMetadata_->entryContains(ANDROID_CONTROL_AWB_LOCK_AVAILABLE, + ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE)) { + LOG(HAL, Info) << noMode << "missing AWB lock"; + return false; + } + + found = staticMetadata_->getEntry(ANDROID_SYNC_MAX_LATENCY, &entry); + if (!found || *entry.data.i32 < 0 || 4 < *entry.data.i32) { + LOG(HAL, Info) + << noMode << "max sync latency is " + << (found ? std::to_string(*entry.data.i32) : "not present"); + return false; + } + + /* + * \todo return true here after we satisfy all the requirements + * https://developer.android.com/reference/android/hardware/camera2/CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE + */ + return false; +} + +std::set +CameraCapabilities::computeCapabilities() +{ + std::set + capabilities; + + capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE); + + if (validateManualSensorCapability()) { + capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR); + /* The requirements for READ_SENSOR_SETTINGS are a subset of MANUAL_SENSOR */ + capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_READ_SENSOR_SETTINGS); + } + + if (validateManualPostProcessingCapability()) + capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING); + + if (validateBurstCaptureCapability()) + capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE); + + if (rawStreamAvailable_) + capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_RAW); + + return capabilities; +} + +void CameraCapabilities::computeHwLevel( + const std::set &caps) +{ + const char *noFull = "Hardware level FULL unavailable: "; + camera_metadata_ro_entry_t entry; + bool found; + + camera_metadata_enum_android_info_supported_hardware_level + hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL; + + if (!caps.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR)) { + LOG(HAL, Info) << noFull << "missing manual sensor"; + hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED; + } + + if (!caps.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING)) { + LOG(HAL, Info) << noFull << "missing manual post processing"; + hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED; + } + + if (!caps.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE)) { + LOG(HAL, Info) << noFull << "missing burst capture"; + hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED; + } + + found = staticMetadata_->getEntry(ANDROID_SYNC_MAX_LATENCY, &entry); + if (!found || *entry.data.i32 != 0) { + LOG(HAL, Info) << noFull << "missing or invalid max sync latency"; + hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED; + } + + hwLevel_ = hwLevel; +} + +int CameraCapabilities::initialize(std::shared_ptr camera, + int orientation, int facing) +{ + camera_ = camera; + orientation_ = orientation; + facing_ = facing; + rawStreamAvailable_ = false; + maxFrameDuration_ = 0; + + /* Acquire the camera and initialize available stream configurations. */ + int ret = camera_->acquire(); + if (ret) { + LOG(HAL, Error) << "Failed to temporarily acquire the camera"; + return ret; + } + + ret = initializeStreamConfigurations(); + if (ret) { + camera_->release(); + return ret; + } + + ret = initializeStaticMetadata(); + camera_->release(); + return ret; +} + +std::vector +CameraCapabilities::initializeYUVResolutions(const PixelFormat &pixelFormat, + const std::vector &resolutions) +{ + std::vector supportedResolutions; + std::unique_ptr cameraConfig = + camera_->generateConfiguration({ StreamRole::Viewfinder }); + if (!cameraConfig) { + LOG(HAL, Error) << "Failed to get supported YUV resolutions"; + return supportedResolutions; + } + + StreamConfiguration &cfg = cameraConfig->at(0); + + for (const Size &res : resolutions) { + cfg.pixelFormat = pixelFormat; + cfg.size = res; + + CameraConfiguration::Status status = cameraConfig->validate(); + if (status != CameraConfiguration::Valid) { + LOG(HAL, Debug) << cfg.toString() << " not supported"; + continue; + } + + LOG(HAL, Debug) << cfg.toString() << " supported"; + + supportedResolutions.push_back(res); + } + + return supportedResolutions; +} + +std::vector +CameraCapabilities::initializeRawResolutions(const PixelFormat &pixelFormat) +{ + std::vector supportedResolutions; + std::unique_ptr cameraConfig = + camera_->generateConfiguration({ StreamRole::Raw }); + if (!cameraConfig) { + LOG(HAL, Error) << "Failed to get supported Raw resolutions"; + return supportedResolutions; + } + + StreamConfiguration &cfg = cameraConfig->at(0); + const StreamFormats &formats = cfg.formats(); + supportedResolutions = formats.sizes(pixelFormat); + + return supportedResolutions; +} + +/* + * Initialize the format conversion map to translate from Android format + * identifier to libcamera pixel formats and fill in the list of supported + * stream configurations to be reported to the Android camera framework through + * the camera static metadata. + */ +int CameraCapabilities::initializeStreamConfigurations() +{ + /* + * Get the maximum output resolutions + * \todo Get this from the camera properties once defined + */ + std::unique_ptr cameraConfig = + camera_->generateConfiguration({ StreamRole::StillCapture }); + if (!cameraConfig) { + LOG(HAL, Error) << "Failed to get maximum resolution"; + return -EINVAL; + } + StreamConfiguration &cfg = cameraConfig->at(0); + + /* + * \todo JPEG - Adjust the maximum available resolution by taking the + * JPEG encoder requirements into account (alignment and aspect ratio). + */ + const Size maxRes = cfg.size; + LOG(HAL, Debug) << "Maximum supported resolution: " << maxRes; + + /* + * Build the list of supported image resolutions. + * + * The resolutions listed in camera3Resolution are supported, up to the + * camera maximum resolution. + * + * Augment the list by adding resolutions calculated from the camera + * maximum one. + */ + std::vector cameraResolutions; + std::copy_if(camera3Resolutions.begin(), camera3Resolutions.end(), + std::back_inserter(cameraResolutions), + [&](const Size &res) { return res < maxRes; }); + + /* + * The Camera3 specification suggests adding 1/2 and 1/4 of the maximum + * resolution. + */ + for (unsigned int divider = 2;; divider <<= 1) { + Size derivedSize{ + maxRes.width / divider, + maxRes.height / divider, + }; + + if (derivedSize.width < 320 || + derivedSize.height < 240) + break; + + cameraResolutions.push_back(derivedSize); + } + cameraResolutions.push_back(maxRes); + + /* Remove duplicated entries from the list of supported resolutions. */ + std::sort(cameraResolutions.begin(), cameraResolutions.end()); + auto last = std::unique(cameraResolutions.begin(), cameraResolutions.end()); + cameraResolutions.erase(last, cameraResolutions.end()); + + /* + * Build the list of supported camera formats. + * + * To each Android format a list of compatible libcamera formats is + * associated. The first libcamera format that tests successful is added + * to the format translation map used when configuring the streams. + * It is then tested against the list of supported camera resolutions to + * build the stream configuration map reported through the camera static + * metadata. + */ + Size maxJpegSize; + for (const auto &format : camera3FormatsMap) { + int androidFormat = format.first; + const Camera3Format &camera3Format = format.second; + const std::vector &libcameraFormats = + camera3Format.libcameraFormats; + + LOG(HAL, Debug) << "Trying to map Android format " + << camera3Format.name; + + /* + * JPEG is always supported, either produced directly by the + * camera, or encoded in the HAL. + */ + if (androidFormat == HAL_PIXEL_FORMAT_BLOB) { + formatsMap_[androidFormat] = formats::MJPEG; + LOG(HAL, Debug) << "Mapped Android format " + << camera3Format.name << " to " + << formats::MJPEG + << " (fixed mapping)"; + continue; + } + + /* + * Test the libcamera formats that can produce images + * compatible with the format defined by Android. + */ + PixelFormat mappedFormat; + for (const PixelFormat &pixelFormat : libcameraFormats) { + + LOG(HAL, Debug) << "Testing " << pixelFormat; + + /* + * The stream configuration size can be adjusted, + * not the pixel format. + * + * \todo This could be simplified once all pipeline + * handlers will report the StreamFormats list of + * supported formats. + */ + cfg.pixelFormat = pixelFormat; + + CameraConfiguration::Status status = cameraConfig->validate(); + if (status != CameraConfiguration::Invalid && + cfg.pixelFormat == pixelFormat) { + mappedFormat = pixelFormat; + break; + } + } + + if (!mappedFormat.isValid()) { + /* If the format is not mandatory, skip it. */ + if (!camera3Format.mandatory) + continue; + + LOG(HAL, Error) + << "Failed to map mandatory Android format " + << camera3Format.name << " (" + << utils::hex(androidFormat) << "): aborting"; + return -EINVAL; + } + + /* + * Record the mapping and then proceed to generate the + * stream configurations map, by testing the image resolutions. + */ + formatsMap_[androidFormat] = mappedFormat; + LOG(HAL, Debug) << "Mapped Android format " + << camera3Format.name << " to " + << mappedFormat; + + std::vector resolutions; + const PixelFormatInfo &info = PixelFormatInfo::info(mappedFormat); + switch (info.colourEncoding) { + case PixelFormatInfo::ColourEncodingRAW: + if (info.bitsPerPixel != 16) + continue; + + rawStreamAvailable_ = true; + resolutions = initializeRawResolutions(mappedFormat); + break; + + case PixelFormatInfo::ColourEncodingYUV: + case PixelFormatInfo::ColourEncodingRGB: + /* + * We support enumerating RGB streams here to allow + * mapping IMPLEMENTATION_DEFINED format to RGB. + */ + resolutions = initializeYUVResolutions(mappedFormat, + cameraResolutions); + break; + } + + for (const Size &res : resolutions) { + /* + * Configure the Camera with the collected format and + * resolution to get an updated list of controls. + * + * \todo Avoid the need to configure the camera when + * redesigning the configuration API. + */ + cfg.size = res; + int ret = camera_->configure(cameraConfig.get()); + if (ret) + return ret; + + const ControlInfoMap &controls = camera_->controls(); + const auto frameDurations = controls.find( + &controls::FrameDurationLimits); + if (frameDurations == controls.end()) { + LOG(HAL, Error) + << "Camera does not report frame durations"; + return -EINVAL; + } + + int64_t minFrameDuration = frameDurations->second.min().get() * 1000; + int64_t maxFrameDuration = frameDurations->second.max().get() * 1000; + + /* + * Cap min frame duration to 30 FPS with 1% tolerance. + * + * 30 frames per second has been validated as the most + * opportune frame rate for quality tuning, and power + * vs performances budget on Intel IPU3-based + * Chromebooks. + * + * \todo This is a platform-specific decision that needs + * to be abstracted and delegated to the configuration + * file. + * + * \todo libcamera only allows to control frame duration + * through the per-request controls::FrameDuration + * control. If we cap the durations here, we should be + * capable of configuring the camera to operate at such + * duration without requiring to have the FrameDuration + * control to be specified for each Request. Defer this + * to the in-development configuration API rework. + */ + int64_t minFrameDurationCap = 1e9 / 30.0; + if (minFrameDuration < minFrameDurationCap) { + float tolerance = + (minFrameDurationCap - minFrameDuration) * 100.0 / minFrameDurationCap; + + /* + * If the tolerance is less than 1%, do not cap + * the frame duration. + */ + if (tolerance > 1.0) + minFrameDuration = minFrameDurationCap; + } + + /* + * Calculate FPS as CTS does and adjust the minimum + * frame duration accordingly: see + * Camera2SurfaceViewTestCase.java:getSuitableFpsRangeForDuration() + */ + minFrameDuration = + 1e9 / static_cast(floor(1e9 / minFrameDuration + 0.05f)); + + streamConfigurations_.push_back({ + res, androidFormat, minFrameDuration, maxFrameDuration, + }); + + /* + * If the format is HAL_PIXEL_FORMAT_YCbCr_420_888 + * from which JPEG is produced, add an entry for + * the JPEG stream. + * + * \todo Wire the JPEG encoder to query the supported + * sizes provided a list of formats it can encode. + * + * \todo Support JPEG streams produced by the camera + * natively. + * + * \todo HAL_PIXEL_FORMAT_BLOB is a 'stalling' format, + * its duration should take into account the time + * required for the YUV to JPEG encoding. For now + * use the same frame durations as collected for + * the YUV/RGB streams. + */ + if (androidFormat == HAL_PIXEL_FORMAT_YCbCr_420_888) { + streamConfigurations_.push_back({ + res, HAL_PIXEL_FORMAT_BLOB, + minFrameDuration, maxFrameDuration, + }); + maxJpegSize = std::max(maxJpegSize, res); + } + + maxFrameDuration_ = std::max(maxFrameDuration_, + maxFrameDuration); + } + + /* + * \todo Calculate the maximum JPEG buffer size by asking the + * encoder giving the maximum frame size required. + */ + maxJpegBufferSize_ = maxJpegSize.width * maxJpegSize.height * 1.5; + } + + LOG(HAL, Debug) << "Collected stream configuration map: "; + for (const auto &entry : streamConfigurations_) + LOG(HAL, Debug) << "{ " << entry.resolution << " - " + << utils::hex(entry.androidFormat) << " }"; + + return 0; +} + +int CameraCapabilities::initializeStaticMetadata() +{ + staticMetadata_ = std::make_unique(64, 1024); + if (!staticMetadata_->isValid()) { + LOG(HAL, Error) << "Failed to allocate static metadata"; + staticMetadata_.reset(); + return -EINVAL; + } + + /* + * Generate and apply a new configuration for the Viewfinder role to + * collect control limits and properties from a known state. + */ + std::unique_ptr cameraConfig = + camera_->generateConfiguration({ StreamRole::Viewfinder }); + if (!cameraConfig) { + LOG(HAL, Error) << "Failed to generate camera configuration"; + staticMetadata_.reset(); + return -ENODEV; + } + + int ret = camera_->configure(cameraConfig.get()); + if (ret) { + LOG(HAL, Error) << "Failed to initialize the camera state"; + staticMetadata_.reset(); + return ret; + } + + const ControlInfoMap &controlsInfo = camera_->controls(); + const ControlList &properties = camera_->properties(); + + availableCharacteristicsKeys_ = { + ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES, + ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES, + ANDROID_CONTROL_AE_AVAILABLE_MODES, + ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, + ANDROID_CONTROL_AE_COMPENSATION_RANGE, + ANDROID_CONTROL_AE_COMPENSATION_STEP, + ANDROID_CONTROL_AE_LOCK_AVAILABLE, + ANDROID_CONTROL_AF_AVAILABLE_MODES, + ANDROID_CONTROL_AVAILABLE_EFFECTS, + ANDROID_CONTROL_AVAILABLE_MODES, + ANDROID_CONTROL_AVAILABLE_SCENE_MODES, + ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, + ANDROID_CONTROL_AWB_AVAILABLE_MODES, + ANDROID_CONTROL_AWB_LOCK_AVAILABLE, + ANDROID_CONTROL_MAX_REGIONS, + ANDROID_CONTROL_SCENE_MODE_OVERRIDES, + ANDROID_FLASH_INFO_AVAILABLE, + ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL, + ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, + ANDROID_JPEG_MAX_SIZE, + ANDROID_LENS_FACING, + ANDROID_LENS_INFO_AVAILABLE_APERTURES, + ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, + ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION, + ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE, + ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, + ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES, + ANDROID_REQUEST_AVAILABLE_CAPABILITIES, + ANDROID_REQUEST_MAX_NUM_INPUT_STREAMS, + ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS, + ANDROID_REQUEST_PARTIAL_RESULT_COUNT, + ANDROID_REQUEST_PIPELINE_MAX_DEPTH, + ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, + ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS, + ANDROID_SCALER_AVAILABLE_STALL_DURATIONS, + ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, + ANDROID_SCALER_CROPPING_TYPE, + ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES, + ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, + ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT, + ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE, + ANDROID_SENSOR_INFO_MAX_FRAME_DURATION, + ANDROID_SENSOR_INFO_PHYSICAL_SIZE, + ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE, + ANDROID_SENSOR_INFO_SENSITIVITY_RANGE, + ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE, + ANDROID_SENSOR_ORIENTATION, + ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES, + ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, + ANDROID_SYNC_MAX_LATENCY, + }; + + availableRequestKeys_ = { + ANDROID_COLOR_CORRECTION_ABERRATION_MODE, + ANDROID_CONTROL_AE_ANTIBANDING_MODE, + ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, + ANDROID_CONTROL_AE_LOCK, + ANDROID_CONTROL_AE_MODE, + ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, + ANDROID_CONTROL_AE_TARGET_FPS_RANGE, + ANDROID_CONTROL_AF_MODE, + ANDROID_CONTROL_AF_TRIGGER, + ANDROID_CONTROL_AWB_LOCK, + ANDROID_CONTROL_AWB_MODE, + ANDROID_CONTROL_CAPTURE_INTENT, + ANDROID_CONTROL_EFFECT_MODE, + ANDROID_CONTROL_MODE, + ANDROID_CONTROL_SCENE_MODE, + ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, + ANDROID_FLASH_MODE, + ANDROID_JPEG_ORIENTATION, + ANDROID_JPEG_QUALITY, + ANDROID_JPEG_THUMBNAIL_QUALITY, + ANDROID_JPEG_THUMBNAIL_SIZE, + ANDROID_LENS_APERTURE, + ANDROID_LENS_OPTICAL_STABILIZATION_MODE, + ANDROID_NOISE_REDUCTION_MODE, + ANDROID_SCALER_CROP_REGION, + ANDROID_STATISTICS_FACE_DETECT_MODE + }; + + availableResultKeys_ = { + ANDROID_COLOR_CORRECTION_ABERRATION_MODE, + ANDROID_CONTROL_AE_ANTIBANDING_MODE, + ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, + ANDROID_CONTROL_AE_LOCK, + ANDROID_CONTROL_AE_MODE, + ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, + ANDROID_CONTROL_AE_STATE, + ANDROID_CONTROL_AE_TARGET_FPS_RANGE, + ANDROID_CONTROL_AF_MODE, + ANDROID_CONTROL_AF_STATE, + ANDROID_CONTROL_AF_TRIGGER, + ANDROID_CONTROL_AWB_LOCK, + ANDROID_CONTROL_AWB_MODE, + ANDROID_CONTROL_AWB_STATE, + ANDROID_CONTROL_CAPTURE_INTENT, + ANDROID_CONTROL_EFFECT_MODE, + ANDROID_CONTROL_MODE, + ANDROID_CONTROL_SCENE_MODE, + ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, + ANDROID_FLASH_MODE, + ANDROID_FLASH_STATE, + ANDROID_JPEG_GPS_COORDINATES, + ANDROID_JPEG_GPS_PROCESSING_METHOD, + ANDROID_JPEG_GPS_TIMESTAMP, + ANDROID_JPEG_ORIENTATION, + ANDROID_JPEG_QUALITY, + ANDROID_JPEG_SIZE, + ANDROID_JPEG_THUMBNAIL_QUALITY, + ANDROID_JPEG_THUMBNAIL_SIZE, + ANDROID_LENS_APERTURE, + ANDROID_LENS_FOCAL_LENGTH, + ANDROID_LENS_OPTICAL_STABILIZATION_MODE, + ANDROID_LENS_STATE, + ANDROID_NOISE_REDUCTION_MODE, + ANDROID_REQUEST_PIPELINE_DEPTH, + ANDROID_SCALER_CROP_REGION, + ANDROID_SENSOR_EXPOSURE_TIME, + ANDROID_SENSOR_FRAME_DURATION, + ANDROID_SENSOR_ROLLING_SHUTTER_SKEW, + ANDROID_SENSOR_TEST_PATTERN_MODE, + ANDROID_SENSOR_TIMESTAMP, + ANDROID_STATISTICS_FACE_DETECT_MODE, + ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, + ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, + ANDROID_STATISTICS_SCENE_FLICKER, + }; + + /* Color correction static metadata. */ + { + std::vector data; + data.reserve(3); + const auto &infoMap = controlsInfo.find(&controls::draft::ColorCorrectionAberrationMode); + if (infoMap != controlsInfo.end()) { + for (const auto &value : infoMap->second.values()) + data.push_back(value.get()); + } else { + data.push_back(ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF); + } + staticMetadata_->addEntry(ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES, + data); + } + + /* Control static metadata. */ + std::vector aeAvailableAntiBandingModes = { + ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF, + ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ, + ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ, + ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO, + }; + staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES, + aeAvailableAntiBandingModes); + + std::vector aeAvailableModes = { + ANDROID_CONTROL_AE_MODE_ON, + }; + staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_MODES, + aeAvailableModes); + + std::vector aeCompensationRange = { + 0, 0, + }; + staticMetadata_->addEntry(ANDROID_CONTROL_AE_COMPENSATION_RANGE, + aeCompensationRange); + + const camera_metadata_rational_t aeCompensationStep[] = { + { 0, 1 } + }; + staticMetadata_->addEntry(ANDROID_CONTROL_AE_COMPENSATION_STEP, + aeCompensationStep); + + std::vector availableAfModes = { + ANDROID_CONTROL_AF_MODE_OFF, + }; + staticMetadata_->addEntry(ANDROID_CONTROL_AF_AVAILABLE_MODES, + availableAfModes); + + std::vector availableEffects = { + ANDROID_CONTROL_EFFECT_MODE_OFF, + }; + staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_EFFECTS, + availableEffects); + + std::vector availableSceneModes = { + ANDROID_CONTROL_SCENE_MODE_DISABLED, + }; + staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_SCENE_MODES, + availableSceneModes); + + std::vector availableStabilizationModes = { + ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF, + }; + staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, + availableStabilizationModes); + + /* + * \todo Inspect the camera capabilities to report the available + * AWB modes. Default to AUTO as CTS tests require it. + */ + std::vector availableAwbModes = { + ANDROID_CONTROL_AWB_MODE_AUTO, + }; + staticMetadata_->addEntry(ANDROID_CONTROL_AWB_AVAILABLE_MODES, + availableAwbModes); + + std::vector availableMaxRegions = { + 0, 0, 0, + }; + staticMetadata_->addEntry(ANDROID_CONTROL_MAX_REGIONS, + availableMaxRegions); + + std::vector sceneModesOverride = { + ANDROID_CONTROL_AE_MODE_ON, + ANDROID_CONTROL_AWB_MODE_AUTO, + ANDROID_CONTROL_AF_MODE_OFF, + }; + staticMetadata_->addEntry(ANDROID_CONTROL_SCENE_MODE_OVERRIDES, + sceneModesOverride); + + uint8_t aeLockAvailable = ANDROID_CONTROL_AE_LOCK_AVAILABLE_FALSE; + staticMetadata_->addEntry(ANDROID_CONTROL_AE_LOCK_AVAILABLE, + aeLockAvailable); + + uint8_t awbLockAvailable = ANDROID_CONTROL_AWB_LOCK_AVAILABLE_FALSE; + staticMetadata_->addEntry(ANDROID_CONTROL_AWB_LOCK_AVAILABLE, + awbLockAvailable); + + char availableControlModes = ANDROID_CONTROL_MODE_AUTO; + staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_MODES, + availableControlModes); + + /* JPEG static metadata. */ + + /* + * Create the list of supported thumbnail sizes by inspecting the + * available JPEG resolutions collected in streamConfigurations_ and + * generate one entry for each aspect ratio. + * + * The JPEG thumbnailer can freely scale, so pick an arbitrary + * (160, 160) size as the bounding rectangle, which is then cropped to + * the different supported aspect ratios. + */ + constexpr Size maxJpegThumbnail(160, 160); + std::vector thumbnailSizes; + thumbnailSizes.push_back({ 0, 0 }); + for (const auto &entry : streamConfigurations_) { + if (entry.androidFormat != HAL_PIXEL_FORMAT_BLOB) + continue; + + Size thumbnailSize = maxJpegThumbnail + .boundedToAspectRatio({ entry.resolution.width, + entry.resolution.height }); + thumbnailSizes.push_back(thumbnailSize); + } + + std::sort(thumbnailSizes.begin(), thumbnailSizes.end()); + auto last = std::unique(thumbnailSizes.begin(), thumbnailSizes.end()); + thumbnailSizes.erase(last, thumbnailSizes.end()); + + /* Transform sizes in to a list of integers that can be consumed. */ + std::vector thumbnailEntries; + thumbnailEntries.reserve(thumbnailSizes.size() * 2); + for (const auto &size : thumbnailSizes) { + thumbnailEntries.push_back(size.width); + thumbnailEntries.push_back(size.height); + } + staticMetadata_->addEntry(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, + thumbnailEntries); + + staticMetadata_->addEntry(ANDROID_JPEG_MAX_SIZE, maxJpegBufferSize_); + + /* Sensor static metadata. */ + std::array pixelArraySize; + { + const Size &size = properties.get(properties::PixelArraySize).value_or(Size{}); + pixelArraySize[0] = size.width; + pixelArraySize[1] = size.height; + staticMetadata_->addEntry(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE, + pixelArraySize); + } + + const auto &cellSize = properties.get(properties::UnitCellSize); + if (cellSize) { + std::array physicalSize{ + cellSize->width * pixelArraySize[0] / 1e6f, + cellSize->height * pixelArraySize[1] / 1e6f + }; + staticMetadata_->addEntry(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, + physicalSize); + } + + { + const Span rects = + properties.get(properties::PixelArrayActiveAreas).value_or(Span{}); + std::vector data{ + static_cast(rects[0].x), + static_cast(rects[0].y), + static_cast(rects[0].width), + static_cast(rects[0].height), + }; + staticMetadata_->addEntry(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, + data); + } + + int32_t sensitivityRange[] = { + 32, 2400, + }; + staticMetadata_->addEntry(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE, + sensitivityRange); + + /* Report the color filter arrangement if the camera reports it. */ + const auto &filterArr = properties.get(properties::draft::ColorFilterArrangement); + if (filterArr) + staticMetadata_->addEntry(ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT, + *filterArr); + + const auto &exposureInfo = controlsInfo.find(&controls::ExposureTime); + if (exposureInfo != controlsInfo.end()) { + int64_t exposureTimeRange[2] = { + exposureInfo->second.min().get() * 1000LL, + exposureInfo->second.max().get() * 1000LL, + }; + staticMetadata_->addEntry(ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE, + exposureTimeRange, 2); + } + + staticMetadata_->addEntry(ANDROID_SENSOR_ORIENTATION, orientation_); + + std::vector testPatternModes = { + ANDROID_SENSOR_TEST_PATTERN_MODE_OFF + }; + const auto &testPatternsInfo = + controlsInfo.find(&controls::draft::TestPatternMode); + if (testPatternsInfo != controlsInfo.end()) { + const auto &values = testPatternsInfo->second.values(); + ASSERT(!values.empty()); + for (const auto &value : values) { + switch (value.get()) { + case controls::draft::TestPatternModeOff: + /* + * ANDROID_SENSOR_TEST_PATTERN_MODE_OFF is + * already in testPatternModes. + */ + break; + + case controls::draft::TestPatternModeSolidColor: + testPatternModes.push_back( + ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR); + break; + + case controls::draft::TestPatternModeColorBars: + testPatternModes.push_back( + ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS); + break; + + case controls::draft::TestPatternModeColorBarsFadeToGray: + testPatternModes.push_back( + ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS_FADE_TO_GRAY); + break; + + case controls::draft::TestPatternModePn9: + testPatternModes.push_back( + ANDROID_SENSOR_TEST_PATTERN_MODE_PN9); + break; + + case controls::draft::TestPatternModeCustom1: + /* We don't support this yet. */ + break; + + default: + LOG(HAL, Error) << "Unknown test pattern mode: " + << value.get(); + continue; + } + } + } + staticMetadata_->addEntry(ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES, + testPatternModes); + + uint8_t timestampSource = ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_UNKNOWN; + staticMetadata_->addEntry(ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE, + timestampSource); + + staticMetadata_->addEntry(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION, + maxFrameDuration_); + + /* Statistics static metadata. */ + uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF; + staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES, + faceDetectMode); + + int32_t maxFaceCount = 0; + staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, + maxFaceCount); + + { + std::vector data; + data.reserve(2); + const auto &infoMap = controlsInfo.find(&controls::draft::LensShadingMapMode); + if (infoMap != controlsInfo.end()) { + for (const auto &value : infoMap->second.values()) + data.push_back(value.get()); + } else { + data.push_back(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF); + } + staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES, + data); + } + + /* Sync static metadata. */ + setMetadata(staticMetadata_.get(), ANDROID_SYNC_MAX_LATENCY, + controlsInfo, controls::draft::MaxLatency, + ControlRange::Def, + ANDROID_SYNC_MAX_LATENCY_UNKNOWN); + + /* Flash static metadata. */ + char flashAvailable = ANDROID_FLASH_INFO_AVAILABLE_FALSE; + staticMetadata_->addEntry(ANDROID_FLASH_INFO_AVAILABLE, + flashAvailable); + + /* Lens static metadata. */ + std::vector lensApertures = { + 2.53 / 100, + }; + staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_APERTURES, + lensApertures); + + uint8_t lensFacing; + switch (facing_) { + default: + case CAMERA_FACING_FRONT: + lensFacing = ANDROID_LENS_FACING_FRONT; + break; + case CAMERA_FACING_BACK: + lensFacing = ANDROID_LENS_FACING_BACK; + break; + case CAMERA_FACING_EXTERNAL: + lensFacing = ANDROID_LENS_FACING_EXTERNAL; + break; + } + staticMetadata_->addEntry(ANDROID_LENS_FACING, lensFacing); + + std::vector lensFocalLengths = { + 1, + }; + staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, + lensFocalLengths); + + std::vector opticalStabilizations = { + ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF, + }; + staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION, + opticalStabilizations); + + float hypeFocalDistance = 0; + staticMetadata_->addEntry(ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE, + hypeFocalDistance); + + float minFocusDistance = 0; + staticMetadata_->addEntry(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, + minFocusDistance); + + /* Noise reduction modes. */ + { + std::vector data; + data.reserve(5); + const auto &infoMap = controlsInfo.find(&controls::draft::NoiseReductionMode); + if (infoMap != controlsInfo.end()) { + for (const auto &value : infoMap->second.values()) + data.push_back(value.get()); + } else { + data.push_back(ANDROID_NOISE_REDUCTION_MODE_OFF); + } + staticMetadata_->addEntry(ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES, + data); + } + + /* Scaler static metadata. */ + + /* + * \todo The digital zoom factor is a property that depends on the + * desired output configuration and the sensor frame size input to the + * ISP. This information is not available to the Android HAL, not at + * initialization time at least. + * + * As a workaround rely on pipeline handlers initializing the + * ScalerCrop control with the camera default configuration and use the + * maximum and minimum crop rectangles to calculate the digital zoom + * factor. + */ + float maxZoom = 1.0f; + const auto scalerCrop = controlsInfo.find(&controls::ScalerCrop); + if (scalerCrop != controlsInfo.end()) { + Rectangle min = scalerCrop->second.min().get(); + Rectangle max = scalerCrop->second.max().get(); + maxZoom = std::min(1.0f * max.width / min.width, + 1.0f * max.height / min.height); + } + staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, + maxZoom); + + std::vector availableStreamConfigurations; + std::vector minFrameDurations; + int maxYUVFps = 0; + Size maxYUVSize; + + availableStreamConfigurations.reserve(streamConfigurations_.size() * 4); + minFrameDurations.reserve(streamConfigurations_.size() * 4); + + for (const auto &entry : streamConfigurations_) { + /* + * Filter out YUV streams not capable of running at 30 FPS. + * + * This requirement comes from CTS RecordingTest failures most + * probably related to a requirement of the camcoder video + * recording profile. Inspecting the Intel IPU3 HAL + * implementation confirms this but no reference has been found + * in the metadata documentation. + */ + unsigned int fps = + static_cast(floor(1e9 / entry.minFrameDurationNsec)); + + if (entry.androidFormat != HAL_PIXEL_FORMAT_BLOB && fps < 30) + continue; + + /* + * Collect the FPS of the maximum YUV output size to populate + * AE_AVAILABLE_TARGET_FPS_RANGE + */ + if (entry.androidFormat == HAL_PIXEL_FORMAT_YCbCr_420_888 && + entry.resolution > maxYUVSize) { + maxYUVSize = entry.resolution; + maxYUVFps = fps; + } + + /* Stream configuration map. */ + availableStreamConfigurations.push_back(entry.androidFormat); + availableStreamConfigurations.push_back(entry.resolution.width); + availableStreamConfigurations.push_back(entry.resolution.height); + availableStreamConfigurations.push_back( + ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT); + + /* Per-stream durations. */ + minFrameDurations.push_back(entry.androidFormat); + minFrameDurations.push_back(entry.resolution.width); + minFrameDurations.push_back(entry.resolution.height); + minFrameDurations.push_back(entry.minFrameDurationNsec); + + LOG(HAL, Debug) + << "Output Stream: " << utils::hex(entry.androidFormat) + << " (" << entry.resolution << ")[" + << entry.minFrameDurationNsec << "]" + << "@" << fps; + } + staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, + availableStreamConfigurations); + + staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS, + minFrameDurations); + + /* + * Register to the camera service {min, max} and {max, max} with + * 'max' being the larger YUV stream maximum frame rate and 'min' being + * the globally minimum frame rate rounded to the next largest integer + * as the camera service expects the camera maximum frame duration to be + * smaller than 10^9 / minFps. + */ + int32_t minFps = std::ceil(1e9 / maxFrameDuration_); + int32_t availableAeFpsTarget[] = { + minFps, maxYUVFps, maxYUVFps, maxYUVFps, + }; + staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, + availableAeFpsTarget); + + std::vector availableStallDurations; + for (const auto &entry : streamConfigurations_) { + if (entry.androidFormat != HAL_PIXEL_FORMAT_BLOB) + continue; + + availableStallDurations.push_back(entry.androidFormat); + availableStallDurations.push_back(entry.resolution.width); + availableStallDurations.push_back(entry.resolution.height); + availableStallDurations.push_back(entry.minFrameDurationNsec); + } + staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS, + availableStallDurations); + + uint8_t croppingType = ANDROID_SCALER_CROPPING_TYPE_CENTER_ONLY; + staticMetadata_->addEntry(ANDROID_SCALER_CROPPING_TYPE, croppingType); + + /* Request static metadata. */ + int32_t partialResultCount = 1; + staticMetadata_->addEntry(ANDROID_REQUEST_PARTIAL_RESULT_COUNT, + partialResultCount); + + { + /* Default the value to 2 if not reported by the camera. */ + uint8_t maxPipelineDepth = 2; + const auto &infoMap = controlsInfo.find(&controls::draft::PipelineDepth); + if (infoMap != controlsInfo.end()) + maxPipelineDepth = infoMap->second.max().get(); + staticMetadata_->addEntry(ANDROID_REQUEST_PIPELINE_MAX_DEPTH, + maxPipelineDepth); + } + + /* LIMITED does not support reprocessing. */ + uint32_t maxNumInputStreams = 0; + staticMetadata_->addEntry(ANDROID_REQUEST_MAX_NUM_INPUT_STREAMS, + maxNumInputStreams); + + /* Number of { RAW, YUV, JPEG } supported output streams */ + int32_t numOutStreams[] = { rawStreamAvailable_, 2, 1 }; + staticMetadata_->addEntry(ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS, + numOutStreams); + + /* Check capabilities */ + capabilities_ = computeCapabilities(); + /* This *must* be uint8_t. */ + std::vector capsVec(capabilities_.begin(), capabilities_.end()); + staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_CAPABILITIES, capsVec); + + computeHwLevel(capabilities_); + staticMetadata_->addEntry(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL, hwLevel_); + + LOG(HAL, Info) + << "Hardware level: " << hwLevelStrings.find(hwLevel_)->second; + + staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, + std::vector(availableCharacteristicsKeys_.begin(), + availableCharacteristicsKeys_.end())); + + staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS, + std::vector(availableRequestKeys_.begin(), + availableRequestKeys_.end())); + + staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_RESULT_KEYS, + std::vector(availableResultKeys_.begin(), + availableResultKeys_.end())); + + if (!staticMetadata_->isValid()) { + LOG(HAL, Error) << "Failed to construct static metadata"; + staticMetadata_.reset(); + return -EINVAL; + } + + if (staticMetadata_->resized()) { + auto [entryCount, dataCount] = staticMetadata_->usage(); + LOG(HAL, Info) + << "Static metadata resized: " << entryCount + << " entries and " << dataCount << " bytes used"; + } + + return 0; +} + +/* Translate Android format code to libcamera pixel format. */ +PixelFormat CameraCapabilities::toPixelFormat(int format) const +{ + auto it = formatsMap_.find(format); + if (it == formatsMap_.end()) { + LOG(HAL, Error) << "Requested format " << utils::hex(format) + << " not supported"; + return PixelFormat(); + } + + return it->second; +} + +std::unique_ptr CameraCapabilities::requestTemplateManual() const +{ + if (!capabilities_.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR)) { + LOG(HAL, Error) << "Manual template not supported"; + return nullptr; + } + + std::unique_ptr manualTemplate = requestTemplatePreview(); + if (!manualTemplate) + return nullptr; + + return manualTemplate; +} + +std::unique_ptr CameraCapabilities::requestTemplatePreview() const +{ + /* + * Give initial hint of entries and number of bytes to be allocated. + * It is deliberate that the hint is slightly larger than required, to + * avoid resizing the container. + * + * CameraMetadata is capable of resizing the container on the fly, if + * adding a new entry will exceed its capacity. + */ + auto requestTemplate = std::make_unique(22, 38); + if (!requestTemplate->isValid()) { + return nullptr; + } + + /* Get the FPS range registered in the static metadata. */ + camera_metadata_ro_entry_t entry; + bool found = staticMetadata_->getEntry(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, + &entry); + if (!found) { + LOG(HAL, Error) << "Cannot create capture template without FPS range"; + return nullptr; + } + + /* + * Assume the AE_AVAILABLE_TARGET_FPS_RANGE static metadata + * has been assembled as {{min, max} {max, max}}. + */ + requestTemplate->addEntry(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, + entry.data.i32, 2); + + /* + * Get thumbnail sizes from static metadata and add the first non-zero + * size to the template. + */ + found = staticMetadata_->getEntry(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, + &entry); + ASSERT(found && entry.count >= 4); + requestTemplate->addEntry(ANDROID_JPEG_THUMBNAIL_SIZE, + entry.data.i32 + 2, 2); + + uint8_t aeMode = ANDROID_CONTROL_AE_MODE_ON; + requestTemplate->addEntry(ANDROID_CONTROL_AE_MODE, aeMode); + + int32_t aeExposureCompensation = 0; + requestTemplate->addEntry(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, + aeExposureCompensation); + + uint8_t aePrecaptureTrigger = ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE; + requestTemplate->addEntry(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, + aePrecaptureTrigger); + + uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF; + requestTemplate->addEntry(ANDROID_CONTROL_AE_LOCK, aeLock); + + uint8_t aeAntibandingMode = ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO; + requestTemplate->addEntry(ANDROID_CONTROL_AE_ANTIBANDING_MODE, + aeAntibandingMode); + + uint8_t afMode = ANDROID_CONTROL_AF_MODE_OFF; + requestTemplate->addEntry(ANDROID_CONTROL_AF_MODE, afMode); + + uint8_t afTrigger = ANDROID_CONTROL_AF_TRIGGER_IDLE; + requestTemplate->addEntry(ANDROID_CONTROL_AF_TRIGGER, afTrigger); + + uint8_t awbMode = ANDROID_CONTROL_AWB_MODE_AUTO; + requestTemplate->addEntry(ANDROID_CONTROL_AWB_MODE, awbMode); + + uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF; + requestTemplate->addEntry(ANDROID_CONTROL_AWB_LOCK, awbLock); + + uint8_t flashMode = ANDROID_FLASH_MODE_OFF; + requestTemplate->addEntry(ANDROID_FLASH_MODE, flashMode); + + uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF; + requestTemplate->addEntry(ANDROID_STATISTICS_FACE_DETECT_MODE, + faceDetectMode); + + uint8_t noiseReduction = ANDROID_NOISE_REDUCTION_MODE_OFF; + requestTemplate->addEntry(ANDROID_NOISE_REDUCTION_MODE, + noiseReduction); + + uint8_t aberrationMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF; + requestTemplate->addEntry(ANDROID_COLOR_CORRECTION_ABERRATION_MODE, + aberrationMode); + + uint8_t controlMode = ANDROID_CONTROL_MODE_AUTO; + requestTemplate->addEntry(ANDROID_CONTROL_MODE, controlMode); + + float lensAperture = 2.53 / 100; + requestTemplate->addEntry(ANDROID_LENS_APERTURE, lensAperture); + + uint8_t opticalStabilization = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF; + requestTemplate->addEntry(ANDROID_LENS_OPTICAL_STABILIZATION_MODE, + opticalStabilization); + + uint8_t captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW; + requestTemplate->addEntry(ANDROID_CONTROL_CAPTURE_INTENT, + captureIntent); + + return requestTemplate; +} + +std::unique_ptr CameraCapabilities::requestTemplateStill() const +{ + std::unique_ptr stillTemplate = requestTemplatePreview(); + if (!stillTemplate) + return nullptr; + + return stillTemplate; +} + +std::unique_ptr CameraCapabilities::requestTemplateVideo() const +{ + std::unique_ptr previewTemplate = requestTemplatePreview(); + if (!previewTemplate) + return nullptr; + + /* + * The video template requires a fixed FPS range. Everything else + * stays the same as the preview template. + */ + camera_metadata_ro_entry_t entry; + staticMetadata_->getEntry(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, + &entry); + + /* + * Assume the AE_AVAILABLE_TARGET_FPS_RANGE static metadata + * has been assembled as {{min, max} {max, max}}. + */ + previewTemplate->updateEntry(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, + entry.data.i32 + 2, 2); + + return previewTemplate; +} diff --git a/spider-cam/libcamera/src/android/camera_capabilities.h b/spider-cam/libcamera/src/android/camera_capabilities.h new file mode 100644 index 0000000..56ac1ef --- /dev/null +++ b/spider-cam/libcamera/src/android/camera_capabilities.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Camera static properties manager + */ + +#pragma once + +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include "camera_metadata.h" + +class CameraCapabilities +{ +public: + CameraCapabilities() = default; + + int initialize(std::shared_ptr camera, + int orientation, int facing); + + CameraMetadata *staticMetadata() const { return staticMetadata_.get(); } + libcamera::PixelFormat toPixelFormat(int format) const; + unsigned int maxJpegBufferSize() const { return maxJpegBufferSize_; } + + std::unique_ptr requestTemplateManual() const; + std::unique_ptr requestTemplatePreview() const; + std::unique_ptr requestTemplateStill() const; + std::unique_ptr requestTemplateVideo() const; + +private: + LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraCapabilities) + + struct Camera3StreamConfiguration { + libcamera::Size resolution; + int androidFormat; + int64_t minFrameDurationNsec; + int64_t maxFrameDurationNsec; + }; + + bool validateManualSensorCapability(); + bool validateManualPostProcessingCapability(); + bool validateBurstCaptureCapability(); + + std::set + computeCapabilities(); + + void computeHwLevel( + const std::set &caps); + + std::vector + initializeYUVResolutions(const libcamera::PixelFormat &pixelFormat, + const std::vector &resolutions); + std::vector + initializeRawResolutions(const libcamera::PixelFormat &pixelFormat); + int initializeStreamConfigurations(); + + int initializeStaticMetadata(); + + std::shared_ptr camera_; + + int facing_; + int orientation_; + bool rawStreamAvailable_; + int64_t maxFrameDuration_; + camera_metadata_enum_android_info_supported_hardware_level hwLevel_; + std::set capabilities_; + + std::vector streamConfigurations_; + std::map formatsMap_; + std::unique_ptr staticMetadata_; + unsigned int maxJpegBufferSize_; + + std::set availableCharacteristicsKeys_; + std::set availableRequestKeys_; + std::set availableResultKeys_; +}; diff --git a/spider-cam/libcamera/src/android/camera_device.cpp b/spider-cam/libcamera/src/android/camera_device.cpp new file mode 100644 index 0000000..493f66e --- /dev/null +++ b/spider-cam/libcamera/src/android/camera_device.cpp @@ -0,0 +1,1614 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * libcamera Android Camera Device + */ + +#include "camera_device.h" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "system/graphics.h" + +#include "camera_buffer.h" +#include "camera_hal_config.h" +#include "camera_ops.h" +#include "camera_request.h" +#include "hal_framebuffer.h" + +using namespace libcamera; + +LOG_DECLARE_CATEGORY(HAL) + +namespace { + +/* + * \struct Camera3StreamConfig + * \brief Data to store StreamConfiguration associated with camera3_stream(s) + * \var streams List of the pairs of a stream requested by Android HAL client + * and CameraStream::Type associated with the stream + * \var config StreamConfiguration for streams + */ +struct Camera3StreamConfig { + struct Camera3Stream { + camera3_stream_t *stream; + CameraStream::Type type; + }; + + std::vector streams; + StreamConfiguration config; +}; + +/* + * Reorder the configurations so that libcamera::Camera can accept them as much + * as possible. The sort rule is as follows. + * 1.) The configuration for NV12 request whose resolution is the largest. + * 2.) The configuration for JPEG request. + * 3.) Others. Larger resolutions and different formats are put earlier. + */ +void sortCamera3StreamConfigs(std::vector &unsortedConfigs, + const camera3_stream_t *jpegStream) +{ + const Camera3StreamConfig *jpegConfig = nullptr; + + std::map> formatToConfigs; + for (const auto &streamConfig : unsortedConfigs) { + if (jpegStream && !jpegConfig) { + const auto &streams = streamConfig.streams; + if (std::find_if(streams.begin(), streams.end(), + [jpegStream](const auto &stream) { + return stream.stream == jpegStream; + }) != streams.end()) { + jpegConfig = &streamConfig; + continue; + } + } + formatToConfigs[streamConfig.config.pixelFormat].push_back(&streamConfig); + } + + if (jpegStream && !jpegConfig) + LOG(HAL, Fatal) << "No Camera3StreamConfig is found for JPEG"; + + for (auto &fmt : formatToConfigs) { + auto &streamConfigs = fmt.second; + + /* Sorted by resolution. Smaller is put first. */ + std::sort(streamConfigs.begin(), streamConfigs.end(), + [](const auto *streamConfigA, const auto *streamConfigB) { + const Size &sizeA = streamConfigA->config.size; + const Size &sizeB = streamConfigB->config.size; + return sizeA < sizeB; + }); + } + + std::vector sortedConfigs; + sortedConfigs.reserve(unsortedConfigs.size()); + + /* + * NV12 is the most prioritized format. Put the configuration with NV12 + * and the largest resolution first. + */ + const auto nv12It = formatToConfigs.find(formats::NV12); + if (nv12It != formatToConfigs.end()) { + auto &nv12Configs = nv12It->second; + const Camera3StreamConfig *nv12Largest = nv12Configs.back(); + + /* + * If JPEG will be created from NV12 and the size is larger than + * the largest NV12 configurations, then put the NV12 + * configuration for JPEG first. + */ + if (jpegConfig && jpegConfig->config.pixelFormat == formats::NV12) { + const Size &nv12SizeForJpeg = jpegConfig->config.size; + const Size &nv12LargestSize = nv12Largest->config.size; + + if (nv12LargestSize < nv12SizeForJpeg) { + LOG(HAL, Debug) << "Insert " << jpegConfig->config.toString(); + sortedConfigs.push_back(std::move(*jpegConfig)); + jpegConfig = nullptr; + } + } + + LOG(HAL, Debug) << "Insert " << nv12Largest->config.toString(); + sortedConfigs.push_back(*nv12Largest); + nv12Configs.pop_back(); + + if (nv12Configs.empty()) + formatToConfigs.erase(nv12It); + } + + /* If the configuration for JPEG is there, then put it. */ + if (jpegConfig) { + LOG(HAL, Debug) << "Insert " << jpegConfig->config.toString(); + sortedConfigs.push_back(std::move(*jpegConfig)); + jpegConfig = nullptr; + } + + /* + * Put configurations with different formats and larger resolutions + * earlier. + */ + while (!formatToConfigs.empty()) { + for (auto it = formatToConfigs.begin(); it != formatToConfigs.end();) { + auto &configs = it->second; + LOG(HAL, Debug) << "Insert " << configs.back()->config.toString(); + sortedConfigs.push_back(*configs.back()); + configs.pop_back(); + + if (configs.empty()) + it = formatToConfigs.erase(it); + else + it++; + } + } + + ASSERT(sortedConfigs.size() == unsortedConfigs.size()); + + unsortedConfigs = sortedConfigs; +} + +const char *rotationToString(int rotation) +{ + switch (rotation) { + case CAMERA3_STREAM_ROTATION_0: + return "0"; + case CAMERA3_STREAM_ROTATION_90: + return "90"; + case CAMERA3_STREAM_ROTATION_180: + return "180"; + case CAMERA3_STREAM_ROTATION_270: + return "270"; + } + return "INVALID"; +} + +const char *directionToString(int stream_type) +{ + switch (stream_type) { + case CAMERA3_STREAM_OUTPUT: + return "Output"; + case CAMERA3_STREAM_INPUT: + return "Input"; + case CAMERA3_STREAM_BIDIRECTIONAL: + return "Bidirectional"; + default: + LOG(HAL, Warning) << "Unknown stream type: " << stream_type; + return "Unknown"; + } +} + +#if defined(OS_CHROMEOS) +/* + * Check whether the crop_rotate_scale_degrees values for all streams in + * the list are valid according to the Chrome OS camera HAL API. + */ +bool validateCropRotate(const camera3_stream_configuration_t &streamList) +{ + ASSERT(streamList.num_streams > 0); + const int cropRotateScaleDegrees = + streamList.streams[0]->crop_rotate_scale_degrees; + for (unsigned int i = 0; i < streamList.num_streams; ++i) { + const camera3_stream_t &stream = *streamList.streams[i]; + + switch (stream.crop_rotate_scale_degrees) { + case CAMERA3_STREAM_ROTATION_0: + case CAMERA3_STREAM_ROTATION_90: + case CAMERA3_STREAM_ROTATION_270: + break; + + /* 180° rotation is specified by Chrome OS as invalid. */ + case CAMERA3_STREAM_ROTATION_180: + default: + LOG(HAL, Error) << "Invalid crop_rotate_scale_degrees: " + << stream.crop_rotate_scale_degrees; + return false; + } + + if (cropRotateScaleDegrees != stream.crop_rotate_scale_degrees) { + LOG(HAL, Error) << "crop_rotate_scale_degrees in all " + << "streams are not identical"; + return false; + } + } + + return true; +} +#endif + +} /* namespace */ + +/* + * \class CameraDevice + * + * The CameraDevice class wraps a libcamera::Camera instance, and implements + * the camera3_device_t interface, bridging calls received from the Android + * camera service to the CameraDevice. + * + * The class translates parameters and operations from the Camera HALv3 API to + * the libcamera API to provide static information for a Camera, create request + * templates for it, process capture requests and then deliver capture results + * back to the framework using the designated callbacks. + */ + +CameraDevice::CameraDevice(unsigned int id, std::shared_ptr camera) + : id_(id), state_(State::Stopped), camera_(std::move(camera)), + facing_(CAMERA_FACING_FRONT), orientation_(0) +{ + camera_->requestCompleted.connect(this, &CameraDevice::requestComplete); + + maker_ = "libcamera"; + model_ = "cameraModel"; + + /* \todo Support getting properties on Android */ + std::ifstream fstream("/var/cache/camera/camera.prop"); + if (!fstream.is_open()) + return; + + std::string line; + while (std::getline(fstream, line)) { + std::string::size_type delimPos = line.find("="); + if (delimPos == std::string::npos) + continue; + std::string key = line.substr(0, delimPos); + std::string val = line.substr(delimPos + 1); + + if (!key.compare("ro.product.model")) + model_ = val; + else if (!key.compare("ro.product.manufacturer")) + maker_ = val; + } +} + +CameraDevice::~CameraDevice() = default; + +std::unique_ptr CameraDevice::create(unsigned int id, + std::shared_ptr cam) +{ + return std::unique_ptr( + new CameraDevice(id, std::move(cam))); +} + +/* + * Initialize the camera static information retrieved from the + * Camera::properties or from the cameraConfigData. + * + * cameraConfigData is optional for external camera devices and can be + * nullptr. + * + * This function is called before the camera device is opened. + */ +int CameraDevice::initialize(const CameraConfigData *cameraConfigData) +{ + /* + * Initialize orientation and facing side of the camera. + * + * If the libcamera::Camera provides those information as retrieved + * from firmware use them, otherwise fallback to values parsed from + * the configuration file. If the configuration file is not available + * the camera is external so its location and rotation can be safely + * defaulted. + */ + const ControlList &properties = camera_->properties(); + + const auto &location = properties.get(properties::Location); + if (location) { + switch (*location) { + case properties::CameraLocationFront: + facing_ = CAMERA_FACING_FRONT; + break; + case properties::CameraLocationBack: + facing_ = CAMERA_FACING_BACK; + break; + case properties::CameraLocationExternal: + /* + * If the camera is reported as external, but the + * CameraHalManager has overriden it, use what is + * reported in the configuration file. This typically + * happens for UVC cameras reported as 'External' by + * libcamera but installed in fixed position on the + * device. + */ + if (cameraConfigData && cameraConfigData->facing != -1) + facing_ = cameraConfigData->facing; + else + facing_ = CAMERA_FACING_EXTERNAL; + break; + } + + if (cameraConfigData && cameraConfigData->facing != -1 && + facing_ != cameraConfigData->facing) { + LOG(HAL, Warning) + << "Camera location does not match" + << " configuration file. Using " << facing_; + } + } else if (cameraConfigData) { + if (cameraConfigData->facing == -1) { + LOG(HAL, Error) + << "Camera facing not in configuration file"; + return -EINVAL; + } + facing_ = cameraConfigData->facing; + } else { + facing_ = CAMERA_FACING_EXTERNAL; + } + + /* + * The Android orientation metadata specifies its rotation correction + * value in clockwise direction whereas libcamera specifies the + * rotation property in anticlockwise direction. Read the libcamera's + * rotation property (anticlockwise) and compute the corresponding + * value for clockwise direction as required by the Android orientation + * metadata. + */ + const auto &rotation = properties.get(properties::Rotation); + if (rotation) { + orientation_ = (360 - *rotation) % 360; + if (cameraConfigData && cameraConfigData->rotation != -1 && + orientation_ != cameraConfigData->rotation) { + LOG(HAL, Warning) + << "Camera orientation does not match" + << " configuration file. Using " << orientation_; + } + } else if (cameraConfigData) { + if (cameraConfigData->rotation == -1) { + LOG(HAL, Error) + << "Camera rotation not in configuration file"; + return -EINVAL; + } + orientation_ = cameraConfigData->rotation; + } else { + orientation_ = 0; + } + + return capabilities_.initialize(camera_, orientation_, facing_); +} + +/* + * Open a camera device. The static information on the camera shall have been + * initialized with a call to CameraDevice::initialize(). + */ +int CameraDevice::open(const hw_module_t *hardwareModule) +{ + int ret = camera_->acquire(); + if (ret) { + LOG(HAL, Error) << "Failed to acquire the camera"; + return ret; + } + + /* Initialize the hw_device_t in the instance camera3_module_t. */ + camera3Device_.common.tag = HARDWARE_DEVICE_TAG; + camera3Device_.common.version = CAMERA_DEVICE_API_VERSION_3_3; + camera3Device_.common.module = (hw_module_t *)hardwareModule; + camera3Device_.common.close = hal_dev_close; + + /* + * The camera device operations. These actually implement + * the Android Camera HALv3 interface. + */ + camera3Device_.ops = &hal_dev_ops; + camera3Device_.priv = this; + + return 0; +} + +void CameraDevice::close() +{ + stop(); + + camera_->release(); +} + +void CameraDevice::flush() +{ + { + MutexLocker stateLock(stateMutex_); + if (state_ != State::Running) + return; + + state_ = State::Flushing; + } + + camera_->stop(); + + MutexLocker stateLock(stateMutex_); + state_ = State::Stopped; +} + +void CameraDevice::stop() +{ + MutexLocker stateLock(stateMutex_); + + camera_->stop(); + + { + MutexLocker descriptorsLock(descriptorsMutex_); + descriptors_ = {}; + } + + streams_.clear(); + + state_ = State::Stopped; +} + +unsigned int CameraDevice::maxJpegBufferSize() const +{ + return capabilities_.maxJpegBufferSize(); +} + +void CameraDevice::setCallbacks(const camera3_callback_ops_t *callbacks) +{ + callbacks_ = callbacks; +} + +const camera_metadata_t *CameraDevice::getStaticMetadata() +{ + return capabilities_.staticMetadata()->getMetadata(); +} + +/* + * Produce a metadata pack to be used as template for a capture request. + */ +const camera_metadata_t *CameraDevice::constructDefaultRequestSettings(int type) +{ + auto it = requestTemplates_.find(type); + if (it != requestTemplates_.end()) + return it->second->getMetadata(); + + /* Use the capture intent matching the requested template type. */ + std::unique_ptr requestTemplate; + uint8_t captureIntent; + switch (type) { + case CAMERA3_TEMPLATE_PREVIEW: + captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW; + requestTemplate = capabilities_.requestTemplatePreview(); + break; + case CAMERA3_TEMPLATE_STILL_CAPTURE: + /* + * Use the preview template for still capture, they only differ + * for the torch mode we currently do not support. + */ + captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE; + requestTemplate = capabilities_.requestTemplateStill(); + break; + case CAMERA3_TEMPLATE_VIDEO_RECORD: + captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD; + requestTemplate = capabilities_.requestTemplateVideo(); + break; + case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT: + captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT; + requestTemplate = capabilities_.requestTemplateVideo(); + break; + case CAMERA3_TEMPLATE_MANUAL: + captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_MANUAL; + requestTemplate = capabilities_.requestTemplateManual(); + break; + /* \todo Implement templates generation for the remaining use cases. */ + case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG: + default: + LOG(HAL, Error) << "Unsupported template request type: " << type; + return nullptr; + } + + if (!requestTemplate || !requestTemplate->isValid()) { + LOG(HAL, Error) << "Failed to construct request template"; + return nullptr; + } + + requestTemplate->updateEntry(ANDROID_CONTROL_CAPTURE_INTENT, + captureIntent); + + requestTemplates_[type] = std::move(requestTemplate); + return requestTemplates_[type]->getMetadata(); +} + +/* + * Inspect the stream_list to produce a list of StreamConfiguration to + * be use to configure the Camera. + */ +int CameraDevice::configureStreams(camera3_stream_configuration_t *stream_list) +{ + /* Before any configuration attempt, stop the camera. */ + stop(); + + if (stream_list->num_streams == 0) { + LOG(HAL, Error) << "No streams in configuration"; + return -EINVAL; + } + +#if defined(OS_CHROMEOS) + if (!validateCropRotate(*stream_list)) + return -EINVAL; +#endif + + /* + * Generate an empty configuration, and construct a StreamConfiguration + * for each camera3_stream to add to it. + */ + std::unique_ptr config = camera_->generateConfiguration(); + if (!config) { + LOG(HAL, Error) << "Failed to generate camera configuration"; + return -EINVAL; + } + + /* + * Clear and remove any existing configuration from previous calls, and + * ensure the required entries are available without further + * reallocation. + */ + streams_.clear(); + streams_.reserve(stream_list->num_streams); + + std::vector streamConfigs; + streamConfigs.reserve(stream_list->num_streams); + + /* First handle all non-MJPEG streams. */ + camera3_stream_t *jpegStream = nullptr; + for (unsigned int i = 0; i < stream_list->num_streams; ++i) { + camera3_stream_t *stream = stream_list->streams[i]; + Size size(stream->width, stream->height); + + PixelFormat format = capabilities_.toPixelFormat(stream->format); + + LOG(HAL, Info) << "Stream #" << i + << ", direction: " << directionToString(stream->stream_type) + << ", width: " << stream->width + << ", height: " << stream->height + << ", format: " << utils::hex(stream->format) + << ", rotation: " << rotationToString(stream->rotation) +#if defined(OS_CHROMEOS) + << ", crop_rotate_scale_degrees: " + << rotationToString(stream->crop_rotate_scale_degrees) +#endif + << " (" << format << ")"; + + if (!format.isValid()) + return -EINVAL; + + /* \todo Support rotation. */ + if (stream->rotation != CAMERA3_STREAM_ROTATION_0) { + LOG(HAL, Error) << "Rotation is not supported"; + return -EINVAL; + } +#if defined(OS_CHROMEOS) + if (stream->crop_rotate_scale_degrees != CAMERA3_STREAM_ROTATION_0) { + LOG(HAL, Error) << "Rotation is not supported"; + return -EINVAL; + } +#endif + + /* Defer handling of MJPEG streams until all others are known. */ + if (stream->format == HAL_PIXEL_FORMAT_BLOB) { + if (jpegStream) { + LOG(HAL, Error) + << "Multiple JPEG streams are not supported"; + return -EINVAL; + } + + jpegStream = stream; + continue; + } + + /* + * While gralloc usage flags are supposed to report usage + * patterns to select a suitable buffer allocation strategy, in + * practice they're also used to make other decisions, such as + * selecting the actual format for the IMPLEMENTATION_DEFINED + * HAL pixel format. To avoid issues, we thus have to set the + * GRALLOC_USAGE_HW_CAMERA_WRITE flag unconditionally, even for + * streams that will be produced in software. + */ + stream->usage |= GRALLOC_USAGE_HW_CAMERA_WRITE; + + /* + * If a CameraStream with the same size and format as the + * current stream has already been requested, associate the two. + */ + auto iter = std::find_if( + streamConfigs.begin(), streamConfigs.end(), + [&size, &format](const Camera3StreamConfig &streamConfig) { + return streamConfig.config.size == size && + streamConfig.config.pixelFormat == format; + }); + if (iter != streamConfigs.end()) { + /* Add usage to copy the buffer in streams[0] to stream. */ + iter->streams[0].stream->usage |= GRALLOC_USAGE_SW_READ_OFTEN; + stream->usage |= GRALLOC_USAGE_SW_WRITE_OFTEN; + iter->streams.push_back({ stream, CameraStream::Type::Mapped }); + continue; + } + + Camera3StreamConfig streamConfig; + streamConfig.streams = { { stream, CameraStream::Type::Direct } }; + streamConfig.config.size = size; + streamConfig.config.pixelFormat = format; + streamConfigs.push_back(std::move(streamConfig)); + } + + /* Now handle the MJPEG streams, adding a new stream if required. */ + if (jpegStream) { + CameraStream::Type type; + int index = -1; + + /* Search for a compatible stream in the non-JPEG ones. */ + for (size_t i = 0; i < streamConfigs.size(); ++i) { + Camera3StreamConfig &streamConfig = streamConfigs[i]; + const auto &cfg = streamConfig.config; + + /* + * \todo The PixelFormat must also be compatible with + * the encoder. + */ + if (cfg.size.width != jpegStream->width || + cfg.size.height != jpegStream->height) + continue; + + LOG(HAL, Info) + << "Android JPEG stream mapped to libcamera stream " << i; + + type = CameraStream::Type::Mapped; + index = i; + + /* + * The source stream will be read by software to + * produce the JPEG stream. + */ + camera3_stream_t *stream = streamConfig.streams[0].stream; + stream->usage |= GRALLOC_USAGE_SW_READ_OFTEN; + break; + } + + /* + * Without a compatible match for JPEG encoding we must + * introduce a new stream to satisfy the request requirements. + */ + if (index < 0) { + /* + * \todo The pixelFormat should be a 'best-fit' choice + * and may require a validation cycle. This is not yet + * handled, and should be considered as part of any + * stream configuration reworks. + */ + Camera3StreamConfig streamConfig; + streamConfig.config.size.width = jpegStream->width; + streamConfig.config.size.height = jpegStream->height; + streamConfig.config.pixelFormat = formats::NV12; + streamConfigs.push_back(std::move(streamConfig)); + + LOG(HAL, Info) << "Adding " << streamConfig.config.toString() + << " for MJPEG support"; + + type = CameraStream::Type::Internal; + index = streamConfigs.size() - 1; + } + + /* The JPEG stream will be produced by software. */ + jpegStream->usage |= GRALLOC_USAGE_SW_WRITE_OFTEN; + + streamConfigs[index].streams.push_back({ jpegStream, type }); + } + + sortCamera3StreamConfigs(streamConfigs, jpegStream); + for (const auto &streamConfig : streamConfigs) { + config->addConfiguration(streamConfig.config); + + CameraStream *sourceStream = nullptr; + for (auto &stream : streamConfig.streams) { + streams_.emplace_back(this, config.get(), stream.type, + stream.stream, sourceStream, + config->size() - 1); + stream.stream->priv = static_cast(&streams_.back()); + + /* + * The streamConfig.streams vector contains as its first + * element a Direct (or Internal) stream, and then an + * optional set of Mapped streams derived from the + * Direct stream. Cache the Direct stream pointer, to + * be used when constructing the subsequent mapped + * streams. + */ + if (stream.type == CameraStream::Type::Direct) + sourceStream = &streams_.back(); + } + } + + switch (config->validate()) { + case CameraConfiguration::Valid: + break; + case CameraConfiguration::Adjusted: + LOG(HAL, Info) << "Camera configuration adjusted"; + + for (const StreamConfiguration &cfg : *config) + LOG(HAL, Info) << " - " << cfg.toString(); + + return -EINVAL; + case CameraConfiguration::Invalid: + LOG(HAL, Info) << "Camera configuration invalid"; + return -EINVAL; + } + + /* + * Once the CameraConfiguration has been adjusted/validated + * it can be applied to the camera. + */ + int ret = camera_->configure(config.get()); + if (ret) { + LOG(HAL, Error) << "Failed to configure camera '" + << camera_->id() << "'"; + return ret; + } + + /* + * Configure the HAL CameraStream instances using the associated + * StreamConfiguration and set the number of required buffers in + * the Android camera3_stream_t. + */ + for (CameraStream &cameraStream : streams_) { + ret = cameraStream.configure(); + if (ret) { + LOG(HAL, Error) << "Failed to configure camera stream"; + return ret; + } + } + + config_ = std::move(config); + return 0; +} + +std::unique_ptr +CameraDevice::createFrameBuffer(const buffer_handle_t camera3buffer, + PixelFormat pixelFormat, const Size &size) +{ + CameraBuffer buf(camera3buffer, pixelFormat, size, PROT_READ); + if (!buf.isValid()) { + LOG(HAL, Fatal) << "Failed to create CameraBuffer"; + return nullptr; + } + + std::vector planes(buf.numPlanes()); + for (size_t i = 0; i < buf.numPlanes(); ++i) { + SharedFD fd{ camera3buffer->data[i] }; + if (!fd.isValid()) { + LOG(HAL, Fatal) << "No valid fd"; + return nullptr; + } + + planes[i].fd = fd; + planes[i].offset = buf.offset(i); + planes[i].length = buf.size(i); + } + + return std::make_unique(planes, camera3buffer); +} + +int CameraDevice::processControls(Camera3RequestDescriptor *descriptor) +{ + const CameraMetadata &settings = descriptor->settings_; + if (!settings.isValid()) + return 0; + + /* Translate the Android request settings to libcamera controls. */ + ControlList &controls = descriptor->request_->controls(); + camera_metadata_ro_entry_t entry; + if (settings.getEntry(ANDROID_SCALER_CROP_REGION, &entry)) { + const int32_t *data = entry.data.i32; + Rectangle cropRegion{ data[0], data[1], + static_cast(data[2]), + static_cast(data[3]) }; + controls.set(controls::ScalerCrop, cropRegion); + } + + if (settings.getEntry(ANDROID_SENSOR_TEST_PATTERN_MODE, &entry)) { + const int32_t data = *entry.data.i32; + int32_t testPatternMode = controls::draft::TestPatternModeOff; + switch (data) { + case ANDROID_SENSOR_TEST_PATTERN_MODE_OFF: + testPatternMode = controls::draft::TestPatternModeOff; + break; + + case ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR: + testPatternMode = controls::draft::TestPatternModeSolidColor; + break; + + case ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS: + testPatternMode = controls::draft::TestPatternModeColorBars; + break; + + case ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS_FADE_TO_GRAY: + testPatternMode = controls::draft::TestPatternModeColorBarsFadeToGray; + break; + + case ANDROID_SENSOR_TEST_PATTERN_MODE_PN9: + testPatternMode = controls::draft::TestPatternModePn9; + break; + + case ANDROID_SENSOR_TEST_PATTERN_MODE_CUSTOM1: + testPatternMode = controls::draft::TestPatternModeCustom1; + break; + + default: + LOG(HAL, Error) + << "Unknown test pattern mode: " << data; + + return -EINVAL; + } + + controls.set(controls::draft::TestPatternMode, testPatternMode); + } + + return 0; +} + +void CameraDevice::abortRequest(Camera3RequestDescriptor *descriptor) const +{ + notifyError(descriptor->frameNumber_, nullptr, CAMERA3_MSG_ERROR_REQUEST); + + for (auto &buffer : descriptor->buffers_) + buffer.status = Camera3RequestDescriptor::Status::Error; + + descriptor->status_ = Camera3RequestDescriptor::Status::Error; +} + +bool CameraDevice::isValidRequest(camera3_capture_request_t *camera3Request) const +{ + if (!camera3Request) { + LOG(HAL, Error) << "No capture request provided"; + return false; + } + + if (!camera3Request->num_output_buffers || + !camera3Request->output_buffers) { + LOG(HAL, Error) << "No output buffers provided"; + return false; + } + + /* configureStreams() has not been called or has failed. */ + if (streams_.empty() || !config_) { + LOG(HAL, Error) << "No stream is configured"; + return false; + } + + for (uint32_t i = 0; i < camera3Request->num_output_buffers; i++) { + const camera3_stream_buffer_t &outputBuffer = + camera3Request->output_buffers[i]; + if (!outputBuffer.buffer || !(*outputBuffer.buffer)) { + LOG(HAL, Error) << "Invalid native handle"; + return false; + } + + const native_handle_t *handle = *outputBuffer.buffer; + constexpr int kNativeHandleMaxFds = 1024; + if (handle->numFds < 0 || handle->numFds > kNativeHandleMaxFds) { + LOG(HAL, Error) + << "Invalid number of fds (" << handle->numFds + << ") in buffer " << i; + return false; + } + + constexpr int kNativeHandleMaxInts = 1024; + if (handle->numInts < 0 || handle->numInts > kNativeHandleMaxInts) { + LOG(HAL, Error) + << "Invalid number of ints (" << handle->numInts + << ") in buffer " << i; + return false; + } + + const camera3_stream *camera3Stream = outputBuffer.stream; + if (!camera3Stream) + return false; + + const CameraStream *cameraStream = + static_cast(camera3Stream->priv); + + auto found = std::find_if(streams_.begin(), streams_.end(), + [cameraStream](const CameraStream &stream) { + return &stream == cameraStream; + }); + if (found == streams_.end()) { + LOG(HAL, Error) + << "No corresponding configured stream found"; + return false; + } + } + + return true; +} + +int CameraDevice::processCaptureRequest(camera3_capture_request_t *camera3Request) +{ + if (!isValidRequest(camera3Request)) + return -EINVAL; + + /* + * Save the request descriptors for use at completion time. + * The descriptor and the associated memory reserved here are freed + * at request complete time. + */ + auto descriptor = std::make_unique(camera_.get(), + camera3Request); + + /* + * \todo The Android request model is incremental, settings passed in + * previous requests are to be effective until overridden explicitly in + * a new request. Do we need to cache settings incrementally here, or is + * it handled by the Android camera service ? + */ + if (camera3Request->settings) + lastSettings_ = camera3Request->settings; + + descriptor->settings_ = lastSettings_; + + LOG(HAL, Debug) << "Queueing request " << descriptor->request_->cookie() + << " with " << descriptor->buffers_.size() << " streams"; + + /* + * Process all the Direct and Internal streams first, they map directly + * to a libcamera stream. Streams of type Mapped will be handled later. + * + * Collect the CameraStream associated to each requested capture stream. + * Since requestedStreams is an std:set<>, no duplications can happen. + */ + std::set requestedStreams; + for (const auto &[i, buffer] : utils::enumerate(descriptor->buffers_)) { + CameraStream *cameraStream = buffer.stream; + camera3_stream_t *camera3Stream = cameraStream->camera3Stream(); + + std::stringstream ss; + ss << i << " - (" << camera3Stream->width << "x" + << camera3Stream->height << ")" + << "[" << utils::hex(camera3Stream->format) << "] -> " + << "(" << cameraStream->configuration().size << ")[" + << cameraStream->configuration().pixelFormat << "]"; + + /* + * Inspect the camera stream type, create buffers opportunely + * and add them to the Request if required. + */ + FrameBuffer *frameBuffer = nullptr; + UniqueFD acquireFence; + + MutexLocker lock(descriptor->streamsProcessMutex_); + + switch (cameraStream->type()) { + case CameraStream::Type::Mapped: + /* Mapped streams will be handled in the next loop. */ + continue; + + case CameraStream::Type::Direct: + /* + * Create a libcamera buffer using the dmabuf + * descriptors of the camera3Buffer for each stream and + * associate it with the Camera3RequestDescriptor for + * lifetime management only. + */ + buffer.frameBuffer = + createFrameBuffer(*buffer.camera3Buffer, + cameraStream->configuration().pixelFormat, + cameraStream->configuration().size); + frameBuffer = buffer.frameBuffer.get(); + acquireFence = std::move(buffer.fence); + LOG(HAL, Debug) << ss.str() << " (direct)"; + break; + + case CameraStream::Type::Internal: + /* + * Get the frame buffer from the CameraStream internal + * buffer pool. + * + * The buffer has to be returned to the CameraStream + * once it has been processed. + */ + frameBuffer = cameraStream->getBuffer(); + buffer.internalBuffer = frameBuffer; + LOG(HAL, Debug) << ss.str() << " (internal)"; + + descriptor->pendingStreamsToProcess_.insert( + { cameraStream, &buffer }); + break; + } + + if (!frameBuffer) { + LOG(HAL, Error) << "Failed to create frame buffer"; + return -ENOMEM; + } + + auto fence = std::make_unique(std::move(acquireFence)); + descriptor->request_->addBuffer(cameraStream->stream(), + frameBuffer, std::move(fence)); + + requestedStreams.insert(cameraStream); + } + + /* + * Now handle the Mapped streams. If no buffer has been added for them + * because their corresponding direct source stream is not part of this + * particular request, add one here. + */ + for (const auto &[i, buffer] : utils::enumerate(descriptor->buffers_)) { + CameraStream *cameraStream = buffer.stream; + camera3_stream_t *camera3Stream = cameraStream->camera3Stream(); + + if (cameraStream->type() != CameraStream::Type::Mapped) + continue; + + LOG(HAL, Debug) << i << " - (" << camera3Stream->width << "x" + << camera3Stream->height << ")" + << "[" << utils::hex(camera3Stream->format) << "] -> " + << "(" << cameraStream->configuration().size << ")[" + << cameraStream->configuration().pixelFormat << "]" + << " (mapped)"; + + MutexLocker lock(descriptor->streamsProcessMutex_); + descriptor->pendingStreamsToProcess_.insert({ cameraStream, &buffer }); + + /* + * Make sure the CameraStream this stream is mapped on has been + * added to the request. + */ + CameraStream *sourceStream = cameraStream->sourceStream(); + ASSERT(sourceStream); + if (requestedStreams.find(sourceStream) != requestedStreams.end()) + continue; + + /* + * If that's not the case, we need to add a buffer to the request + * for this stream. + */ + FrameBuffer *frameBuffer = cameraStream->getBuffer(); + buffer.internalBuffer = frameBuffer; + + descriptor->request_->addBuffer(sourceStream->stream(), + frameBuffer, nullptr); + + requestedStreams.insert(sourceStream); + } + + /* + * Translate controls from Android to libcamera and queue the request + * to the camera. + */ + int ret = processControls(descriptor.get()); + if (ret) + return ret; + + /* + * If flush is in progress set the request status to error and place it + * on the queue to be later completed. If the camera has been stopped we + * have to re-start it to be able to process the request. + */ + MutexLocker stateLock(stateMutex_); + + if (state_ == State::Flushing) { + Camera3RequestDescriptor *rawDescriptor = descriptor.get(); + { + MutexLocker descriptorsLock(descriptorsMutex_); + descriptors_.push(std::move(descriptor)); + } + abortRequest(rawDescriptor); + completeDescriptor(rawDescriptor); + + return 0; + } + + if (state_ == State::Stopped) { + lastSettings_ = {}; + + ret = camera_->start(); + if (ret) { + LOG(HAL, Error) << "Failed to start camera"; + return ret; + } + + state_ = State::Running; + } + + Request *request = descriptor->request_.get(); + + { + MutexLocker descriptorsLock(descriptorsMutex_); + descriptors_.push(std::move(descriptor)); + } + + camera_->queueRequest(request); + + return 0; +} + +void CameraDevice::requestComplete(Request *request) +{ + Camera3RequestDescriptor *descriptor = + reinterpret_cast(request->cookie()); + + /* + * Prepare the capture result for the Android camera stack. + * + * The buffer status is set to Success and later changed to Error if + * post-processing/compression fails. + */ + for (auto &buffer : descriptor->buffers_) { + CameraStream *stream = buffer.stream; + + /* + * Streams of type Direct have been queued to the + * libcamera::Camera and their acquire fences have + * already been waited on by the library. + * + * Acquire fences of streams of type Internal and Mapped + * will be handled during post-processing. + */ + if (stream->type() == CameraStream::Type::Direct) { + /* If handling of the fence has failed restore buffer.fence. */ + std::unique_ptr fence = buffer.frameBuffer->releaseFence(); + if (fence) + buffer.fence = fence->release(); + } + buffer.status = Camera3RequestDescriptor::Status::Success; + } + + /* + * If the Request has failed, abort the request by notifying the error + * and complete the request with all buffers in error state. + */ + if (request->status() != Request::RequestComplete) { + LOG(HAL, Error) << "Request " << request->cookie() + << " not successfully completed: " + << request->status(); + + abortRequest(descriptor); + completeDescriptor(descriptor); + + return; + } + + /* + * Notify shutter as soon as we have verified we have a valid request. + * + * \todo The shutter event notification should be sent to the framework + * as soon as possible, earlier than request completion time. + */ + uint64_t sensorTimestamp = static_cast(request->metadata() + .get(controls::SensorTimestamp) + .value_or(0)); + notifyShutter(descriptor->frameNumber_, sensorTimestamp); + + LOG(HAL, Debug) << "Request " << request->cookie() << " completed with " + << descriptor->request_->buffers().size() << " streams"; + + /* + * Generate the metadata associated with the captured buffers. + * + * Notify if the metadata generation has failed, but continue processing + * buffers and return an empty metadata pack. + */ + descriptor->resultMetadata_ = getResultMetadata(*descriptor); + if (!descriptor->resultMetadata_) { + notifyError(descriptor->frameNumber_, nullptr, CAMERA3_MSG_ERROR_RESULT); + + /* + * The camera framework expects an empty metadata pack on error. + * + * \todo Check that the post-processor code handles this situation + * correctly. + */ + descriptor->resultMetadata_ = std::make_unique(0, 0); + } + + /* Handle post-processing. */ + MutexLocker locker(descriptor->streamsProcessMutex_); + + /* + * Queue all the post-processing streams request at once. The completion + * slot streamProcessingComplete() can only execute when we are out + * this critical section. This helps to handle synchronous errors here + * itself. + */ + auto iter = descriptor->pendingStreamsToProcess_.begin(); + while (iter != descriptor->pendingStreamsToProcess_.end()) { + CameraStream *stream = iter->first; + Camera3RequestDescriptor::StreamBuffer *buffer = iter->second; + + FrameBuffer *src = request->findBuffer(stream->stream()); + if (!src) { + LOG(HAL, Error) << "Failed to find a source stream buffer"; + setBufferStatus(*buffer, Camera3RequestDescriptor::Status::Error); + iter = descriptor->pendingStreamsToProcess_.erase(iter); + continue; + } + + buffer->srcBuffer = src; + + ++iter; + int ret = stream->process(buffer); + if (ret) { + setBufferStatus(*buffer, Camera3RequestDescriptor::Status::Error); + descriptor->pendingStreamsToProcess_.erase(stream); + + /* + * If the framebuffer is internal to CameraStream return + * it back now that we're done processing it. + */ + if (buffer->internalBuffer) + stream->putBuffer(buffer->internalBuffer); + } + } + + if (descriptor->pendingStreamsToProcess_.empty()) { + locker.unlock(); + completeDescriptor(descriptor); + } +} + +/** + * \brief Complete the Camera3RequestDescriptor + * \param[in] descriptor The Camera3RequestDescriptor that has completed + * + * The function marks the Camera3RequestDescriptor as 'complete'. It shall be + * called when all the streams in the Camera3RequestDescriptor have completed + * capture (or have been generated via post-processing) and the request is ready + * to be sent back to the framework. + * + * \context This function is \threadsafe. + */ +void CameraDevice::completeDescriptor(Camera3RequestDescriptor *descriptor) +{ + MutexLocker lock(descriptorsMutex_); + descriptor->complete_ = true; + + sendCaptureResults(); +} + +/** + * \brief Sequentially send capture results to the framework + * + * Iterate over the descriptors queue to send completed descriptors back to the + * framework, in the same order as they have been queued. For each complete + * descriptor, populate a locally-scoped camera3_capture_result_t from the + * descriptor, send the capture result back by calling the + * process_capture_result() callback, and remove the descriptor from the queue. + * Stop iterating if the descriptor at the front of the queue is not complete. + * + * This function should never be called directly in the codebase. Use + * completeDescriptor() instead. + */ +void CameraDevice::sendCaptureResults() +{ + while (!descriptors_.empty() && !descriptors_.front()->isPending()) { + auto descriptor = std::move(descriptors_.front()); + descriptors_.pop(); + + camera3_capture_result_t captureResult = {}; + + captureResult.frame_number = descriptor->frameNumber_; + + if (descriptor->resultMetadata_) + captureResult.result = + descriptor->resultMetadata_->getMetadata(); + + std::vector resultBuffers; + resultBuffers.reserve(descriptor->buffers_.size()); + + for (auto &buffer : descriptor->buffers_) { + camera3_buffer_status status = CAMERA3_BUFFER_STATUS_ERROR; + + if (buffer.status == Camera3RequestDescriptor::Status::Success) + status = CAMERA3_BUFFER_STATUS_OK; + + /* + * Pass the buffer fence back to the camera framework as + * a release fence. This instructs the framework to wait + * on the acquire fence in case we haven't done so + * ourselves for any reason. + */ + resultBuffers.push_back({ buffer.stream->camera3Stream(), + buffer.camera3Buffer, status, + -1, buffer.fence.release() }); + } + + captureResult.num_output_buffers = resultBuffers.size(); + captureResult.output_buffers = resultBuffers.data(); + + if (descriptor->status_ == Camera3RequestDescriptor::Status::Success) + captureResult.partial_result = 1; + + callbacks_->process_capture_result(callbacks_, &captureResult); + } +} + +void CameraDevice::setBufferStatus(Camera3RequestDescriptor::StreamBuffer &streamBuffer, + Camera3RequestDescriptor::Status status) +{ + streamBuffer.status = status; + if (status != Camera3RequestDescriptor::Status::Success) { + notifyError(streamBuffer.request->frameNumber_, + streamBuffer.stream->camera3Stream(), + CAMERA3_MSG_ERROR_BUFFER); + + /* Also set error status on entire request descriptor. */ + streamBuffer.request->status_ = + Camera3RequestDescriptor::Status::Error; + } +} + +/** + * \brief Handle post-processing completion of a stream in a capture request + * \param[in] streamBuffer The StreamBuffer for which processing is complete + * \param[in] status Stream post-processing status + * + * This function is called from the post-processor's thread whenever a camera + * stream has finished post processing. The corresponding entry is dropped from + * the descriptor's pendingStreamsToProcess_ map. + * + * If the pendingStreamsToProcess_ map is then empty, all streams requiring to + * be generated from post-processing have been completed. Mark the descriptor as + * complete using completeDescriptor() in that case. + */ +void CameraDevice::streamProcessingComplete(Camera3RequestDescriptor::StreamBuffer *streamBuffer, + Camera3RequestDescriptor::Status status) +{ + setBufferStatus(*streamBuffer, status); + + /* + * If the framebuffer is internal to CameraStream return it back now + * that we're done processing it. + */ + if (streamBuffer->internalBuffer) + streamBuffer->stream->putBuffer(streamBuffer->internalBuffer); + + Camera3RequestDescriptor *request = streamBuffer->request; + + { + MutexLocker locker(request->streamsProcessMutex_); + + request->pendingStreamsToProcess_.erase(streamBuffer->stream); + if (!request->pendingStreamsToProcess_.empty()) + return; + } + + completeDescriptor(streamBuffer->request); +} + +std::string CameraDevice::logPrefix() const +{ + return "'" + camera_->id() + "'"; +} + +void CameraDevice::notifyShutter(uint32_t frameNumber, uint64_t timestamp) +{ + camera3_notify_msg_t notify = {}; + + notify.type = CAMERA3_MSG_SHUTTER; + notify.message.shutter.frame_number = frameNumber; + notify.message.shutter.timestamp = timestamp; + + callbacks_->notify(callbacks_, ¬ify); +} + +void CameraDevice::notifyError(uint32_t frameNumber, camera3_stream_t *stream, + camera3_error_msg_code code) const +{ + camera3_notify_msg_t notify = {}; + + notify.type = CAMERA3_MSG_ERROR; + notify.message.error.error_stream = stream; + notify.message.error.frame_number = frameNumber; + notify.message.error.error_code = code; + + callbacks_->notify(callbacks_, ¬ify); +} + +/* + * Produce a set of fixed result metadata. + */ +std::unique_ptr +CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor) const +{ + const ControlList &metadata = descriptor.request_->metadata(); + const CameraMetadata &settings = descriptor.settings_; + camera_metadata_ro_entry_t entry; + bool found; + + /* + * \todo Keep this in sync with the actual number of entries. + * Currently: 40 entries, 156 bytes + * + * Reserve more space for the JPEG metadata set by the post-processor. + * Currently: + * ANDROID_JPEG_GPS_COORDINATES (double x 3) = 24 bytes + * ANDROID_JPEG_GPS_PROCESSING_METHOD (byte x 32) = 32 bytes + * ANDROID_JPEG_GPS_TIMESTAMP (int64) = 8 bytes + * ANDROID_JPEG_SIZE (int32_t) = 4 bytes + * ANDROID_JPEG_QUALITY (byte) = 1 byte + * ANDROID_JPEG_ORIENTATION (int32_t) = 4 bytes + * ANDROID_JPEG_THUMBNAIL_QUALITY (byte) = 1 byte + * ANDROID_JPEG_THUMBNAIL_SIZE (int32 x 2) = 8 bytes + * Total bytes for JPEG metadata: 82 + */ + std::unique_ptr resultMetadata = + std::make_unique(88, 166); + if (!resultMetadata->isValid()) { + LOG(HAL, Error) << "Failed to allocate result metadata"; + return nullptr; + } + + /* + * \todo The value of the results metadata copied from the settings + * will have to be passed to the libcamera::Camera and extracted + * from libcamera::Request::metadata. + */ + + uint8_t value = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF; + resultMetadata->addEntry(ANDROID_COLOR_CORRECTION_ABERRATION_MODE, + value); + + value = ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF; + resultMetadata->addEntry(ANDROID_CONTROL_AE_ANTIBANDING_MODE, value); + + int32_t value32 = 0; + resultMetadata->addEntry(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, + value32); + + value = ANDROID_CONTROL_AE_LOCK_OFF; + resultMetadata->addEntry(ANDROID_CONTROL_AE_LOCK, value); + + value = ANDROID_CONTROL_AE_MODE_ON; + resultMetadata->addEntry(ANDROID_CONTROL_AE_MODE, value); + + if (settings.getEntry(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, &entry)) + /* + * \todo Retrieve the AE FPS range from the libcamera metadata. + * As libcamera does not support that control, as a temporary + * workaround return what the framework asked. + */ + resultMetadata->addEntry(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, + entry.data.i32, 2); + + found = settings.getEntry(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, &entry); + value = found ? *entry.data.u8 : + (uint8_t)ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE; + resultMetadata->addEntry(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, value); + + value = ANDROID_CONTROL_AE_STATE_CONVERGED; + resultMetadata->addEntry(ANDROID_CONTROL_AE_STATE, value); + + value = ANDROID_CONTROL_AF_MODE_OFF; + resultMetadata->addEntry(ANDROID_CONTROL_AF_MODE, value); + + value = ANDROID_CONTROL_AF_STATE_INACTIVE; + resultMetadata->addEntry(ANDROID_CONTROL_AF_STATE, value); + + value = ANDROID_CONTROL_AF_TRIGGER_IDLE; + resultMetadata->addEntry(ANDROID_CONTROL_AF_TRIGGER, value); + + value = ANDROID_CONTROL_AWB_MODE_AUTO; + resultMetadata->addEntry(ANDROID_CONTROL_AWB_MODE, value); + + value = ANDROID_CONTROL_AWB_LOCK_OFF; + resultMetadata->addEntry(ANDROID_CONTROL_AWB_LOCK, value); + + value = ANDROID_CONTROL_AWB_STATE_CONVERGED; + resultMetadata->addEntry(ANDROID_CONTROL_AWB_STATE, value); + + value = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW; + resultMetadata->addEntry(ANDROID_CONTROL_CAPTURE_INTENT, value); + + value = ANDROID_CONTROL_EFFECT_MODE_OFF; + resultMetadata->addEntry(ANDROID_CONTROL_EFFECT_MODE, value); + + value = ANDROID_CONTROL_MODE_AUTO; + resultMetadata->addEntry(ANDROID_CONTROL_MODE, value); + + value = ANDROID_CONTROL_SCENE_MODE_DISABLED; + resultMetadata->addEntry(ANDROID_CONTROL_SCENE_MODE, value); + + value = ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF; + resultMetadata->addEntry(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, value); + + value = ANDROID_FLASH_MODE_OFF; + resultMetadata->addEntry(ANDROID_FLASH_MODE, value); + + value = ANDROID_FLASH_STATE_UNAVAILABLE; + resultMetadata->addEntry(ANDROID_FLASH_STATE, value); + + if (settings.getEntry(ANDROID_LENS_APERTURE, &entry)) + resultMetadata->addEntry(ANDROID_LENS_APERTURE, entry.data.f, 1); + + float focal_length = 1.0; + resultMetadata->addEntry(ANDROID_LENS_FOCAL_LENGTH, focal_length); + + value = ANDROID_LENS_STATE_STATIONARY; + resultMetadata->addEntry(ANDROID_LENS_STATE, value); + + value = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF; + resultMetadata->addEntry(ANDROID_LENS_OPTICAL_STABILIZATION_MODE, + value); + + value32 = ANDROID_SENSOR_TEST_PATTERN_MODE_OFF; + resultMetadata->addEntry(ANDROID_SENSOR_TEST_PATTERN_MODE, value32); + + value = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF; + resultMetadata->addEntry(ANDROID_STATISTICS_FACE_DETECT_MODE, value); + + value = ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF; + resultMetadata->addEntry(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, + value); + + value = ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF; + resultMetadata->addEntry(ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, value); + + value = ANDROID_STATISTICS_SCENE_FLICKER_NONE; + resultMetadata->addEntry(ANDROID_STATISTICS_SCENE_FLICKER, value); + + value = ANDROID_NOISE_REDUCTION_MODE_OFF; + resultMetadata->addEntry(ANDROID_NOISE_REDUCTION_MODE, value); + + /* 33.3 msec */ + const int64_t rolling_shutter_skew = 33300000; + resultMetadata->addEntry(ANDROID_SENSOR_ROLLING_SHUTTER_SKEW, + rolling_shutter_skew); + + /* Add metadata tags reported by libcamera. */ + const int64_t timestamp = metadata.get(controls::SensorTimestamp).value_or(0); + resultMetadata->addEntry(ANDROID_SENSOR_TIMESTAMP, timestamp); + + const auto &pipelineDepth = metadata.get(controls::draft::PipelineDepth); + if (pipelineDepth) + resultMetadata->addEntry(ANDROID_REQUEST_PIPELINE_DEPTH, + *pipelineDepth); + + const auto &exposureTime = metadata.get(controls::ExposureTime); + if (exposureTime) + resultMetadata->addEntry(ANDROID_SENSOR_EXPOSURE_TIME, + *exposureTime * 1000ULL); + + const auto &frameDuration = metadata.get(controls::FrameDuration); + if (frameDuration) + resultMetadata->addEntry(ANDROID_SENSOR_FRAME_DURATION, + *frameDuration * 1000); + + const auto &scalerCrop = metadata.get(controls::ScalerCrop); + if (scalerCrop) { + const Rectangle &crop = *scalerCrop; + int32_t cropRect[] = { + crop.x, crop.y, static_cast(crop.width), + static_cast(crop.height), + }; + resultMetadata->addEntry(ANDROID_SCALER_CROP_REGION, cropRect); + } + + const auto &testPatternMode = metadata.get(controls::draft::TestPatternMode); + if (testPatternMode) + resultMetadata->addEntry(ANDROID_SENSOR_TEST_PATTERN_MODE, + *testPatternMode); + + /* + * Return the result metadata pack even is not valid: get() will return + * nullptr. + */ + if (!resultMetadata->isValid()) { + LOG(HAL, Error) << "Failed to construct result metadata"; + } + + if (resultMetadata->resized()) { + auto [entryCount, dataCount] = resultMetadata->usage(); + LOG(HAL, Info) + << "Result metadata resized: " << entryCount + << " entries and " << dataCount << " bytes used"; + } + + return resultMetadata; +} diff --git a/spider-cam/libcamera/src/android/camera_device.h b/spider-cam/libcamera/src/android/camera_device.h new file mode 100644 index 0000000..194ca30 --- /dev/null +++ b/spider-cam/libcamera/src/android/camera_device.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * libcamera Android Camera Device + */ + +#pragma once + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "camera_capabilities.h" +#include "camera_metadata.h" +#include "camera_stream.h" +#include "hal_framebuffer.h" +#include "jpeg/encoder.h" + +class Camera3RequestDescriptor; +struct CameraConfigData; + +class CameraDevice : protected libcamera::Loggable +{ +public: + static std::unique_ptr create(unsigned int id, + std::shared_ptr cam); + ~CameraDevice(); + + int initialize(const CameraConfigData *cameraConfigData); + + int open(const hw_module_t *hardwareModule); + void close(); + void flush(); + + unsigned int id() const { return id_; } + camera3_device_t *camera3Device() { return &camera3Device_; } + const CameraCapabilities *capabilities() const { return &capabilities_; } + const std::shared_ptr &camera() const { return camera_; } + + const std::string &maker() const { return maker_; } + const std::string &model() const { return model_; } + int facing() const { return facing_; } + int orientation() const { return orientation_; } + unsigned int maxJpegBufferSize() const; + + void setCallbacks(const camera3_callback_ops_t *callbacks); + const camera_metadata_t *getStaticMetadata(); + const camera_metadata_t *constructDefaultRequestSettings(int type); + int configureStreams(camera3_stream_configuration_t *stream_list); + int processCaptureRequest(camera3_capture_request_t *request); + void requestComplete(libcamera::Request *request); + void streamProcessingComplete(Camera3RequestDescriptor::StreamBuffer *bufferStream, + Camera3RequestDescriptor::Status status); + +protected: + std::string logPrefix() const override; + +private: + LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraDevice) + + CameraDevice(unsigned int id, std::shared_ptr camera); + + enum class State { + Stopped, + Flushing, + Running, + }; + + void stop() LIBCAMERA_TSA_EXCLUDES(stateMutex_); + + std::unique_ptr + createFrameBuffer(const buffer_handle_t camera3buffer, + libcamera::PixelFormat pixelFormat, + const libcamera::Size &size); + void abortRequest(Camera3RequestDescriptor *descriptor) const; + bool isValidRequest(camera3_capture_request_t *request) const; + void notifyShutter(uint32_t frameNumber, uint64_t timestamp); + void notifyError(uint32_t frameNumber, camera3_stream_t *stream, + camera3_error_msg_code code) const; + int processControls(Camera3RequestDescriptor *descriptor); + void completeDescriptor(Camera3RequestDescriptor *descriptor) + LIBCAMERA_TSA_EXCLUDES(descriptorsMutex_); + void sendCaptureResults() LIBCAMERA_TSA_REQUIRES(descriptorsMutex_); + void setBufferStatus(Camera3RequestDescriptor::StreamBuffer &buffer, + Camera3RequestDescriptor::Status status); + std::unique_ptr getResultMetadata( + const Camera3RequestDescriptor &descriptor) const; + + unsigned int id_; + camera3_device_t camera3Device_; + + libcamera::Mutex stateMutex_; /* Protects access to the camera state. */ + State state_ LIBCAMERA_TSA_GUARDED_BY(stateMutex_); + + std::shared_ptr camera_; + std::unique_ptr config_; + CameraCapabilities capabilities_; + + std::map> requestTemplates_; + const camera3_callback_ops_t *callbacks_; + + std::vector streams_; + + libcamera::Mutex descriptorsMutex_ LIBCAMERA_TSA_ACQUIRED_AFTER(stateMutex_); + std::queue> descriptors_ + LIBCAMERA_TSA_GUARDED_BY(descriptorsMutex_); + + std::string maker_; + std::string model_; + + int facing_; + int orientation_; + + CameraMetadata lastSettings_; +}; diff --git a/spider-cam/libcamera/src/android/camera_hal_config.cpp b/spider-cam/libcamera/src/android/camera_hal_config.cpp new file mode 100644 index 0000000..7ef451e --- /dev/null +++ b/spider-cam/libcamera/src/android/camera_hal_config.cpp @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Camera HAL configuration file manager + */ +#include "camera_hal_config.h" + +#include +#include + +#include +#include + +#include "libcamera/internal/yaml_parser.h" + +#include + +using namespace libcamera; + +LOG_DEFINE_CATEGORY(HALConfig) + +class CameraHalConfig::Private : public Extensible::Private +{ + LIBCAMERA_DECLARE_PUBLIC(CameraHalConfig) + +public: + Private(); + + int parseConfigFile(File &file, std::map *cameras); + +private: + int parseCameraConfigData(const std::string &cameraId, const YamlObject &); + int parseLocation(const YamlObject &, CameraConfigData &cameraConfigData); + int parseRotation(const YamlObject &, CameraConfigData &cameraConfigData); + + std::map *cameras_; +}; + +CameraHalConfig::Private::Private() +{ +} + +int CameraHalConfig::Private::parseConfigFile(File &file, + std::map *cameras) +{ + /* + * Parse the HAL properties. + * + * Each camera properties block is a list of properties associated + * with the ID (as assembled by CameraSensor::generateId()) of the + * camera they refer to. + * + * cameras: + * "camera0 id": + * location: value + * rotation: value + * ... + * + * "camera1 id": + * location: value + * rotation: value + * ... + */ + + cameras_ = cameras; + + std::unique_ptr root = YamlParser::parse(file); + if (!root) + return -EINVAL; + + if (!root->isDictionary()) + return -EINVAL; + + /* Parse property "cameras" */ + if (!root->contains("cameras")) + return -EINVAL; + + const YamlObject &yamlObjectCameras = (*root)["cameras"]; + + if (!yamlObjectCameras.isDictionary()) + return -EINVAL; + + for (const auto &[cameraId, configData] : yamlObjectCameras.asDict()) { + if (parseCameraConfigData(cameraId, configData)) + return -EINVAL; + } + + return 0; +} + +int CameraHalConfig::Private::parseCameraConfigData(const std::string &cameraId, + const YamlObject &cameraObject) + +{ + if (!cameraObject.isDictionary()) + return -EINVAL; + + CameraConfigData &cameraConfigData = (*cameras_)[cameraId]; + + /* Parse property "location" */ + if (parseLocation(cameraObject, cameraConfigData)) + return -EINVAL; + + /* Parse property "rotation" */ + if (parseRotation(cameraObject, cameraConfigData)) + return -EINVAL; + + return 0; +} + +int CameraHalConfig::Private::parseLocation(const YamlObject &cameraObject, + CameraConfigData &cameraConfigData) +{ + if (!cameraObject.contains("location")) + return -EINVAL; + + std::string location = cameraObject["location"].get(""); + + if (location == "front") + cameraConfigData.facing = CAMERA_FACING_FRONT; + else if (location == "back") + cameraConfigData.facing = CAMERA_FACING_BACK; + else + return -EINVAL; + + return 0; +} + +int CameraHalConfig::Private::parseRotation(const YamlObject &cameraObject, + CameraConfigData &cameraConfigData) +{ + if (!cameraObject.contains("rotation")) + return -EINVAL; + + int32_t rotation = cameraObject["rotation"].get(-1); + + if (rotation < 0 || rotation >= 360) { + LOG(HALConfig, Error) + << "Unknown rotation: " << rotation; + return -EINVAL; + } + + cameraConfigData.rotation = rotation; + return 0; +} + +CameraHalConfig::CameraHalConfig() + : Extensible(std::make_unique()), exists_(false), valid_(false) +{ + parseConfigurationFile(); +} + +/* + * Open the HAL configuration file and validate its content. + * Return 0 on success, a negative error code otherwise + * retval -ENOENT The configuration file is not available + * retval -EINVAL The configuration file is available but not valid + */ +int CameraHalConfig::parseConfigurationFile() +{ + std::string filePath = LIBCAMERA_SYSCONF_DIR "/camera_hal.yaml"; + + File file(filePath); + if (!file.exists()) { + LOG(HALConfig, Debug) + << "Configuration file: \"" << filePath << "\" not found"; + return -ENOENT; + } + + if (!file.open(File::OpenModeFlag::ReadOnly)) { + int ret = file.error(); + LOG(HALConfig, Error) << "Failed to open configuration file " + << filePath << ": " << strerror(-ret); + return ret; + } + + exists_ = true; + + int ret = _d()->parseConfigFile(file, &cameras_); + if (ret) + return -EINVAL; + + valid_ = true; + + for (const auto &c : cameras_) { + const std::string &cameraId = c.first; + const CameraConfigData &camera = c.second; + LOG(HALConfig, Debug) << "'" << cameraId << "' " + << "(" << camera.facing << ")[" + << camera.rotation << "]"; + } + + return 0; +} + +const CameraConfigData *CameraHalConfig::cameraConfigData(const std::string &cameraId) const +{ + const auto &it = cameras_.find(cameraId); + if (it == cameras_.end()) { + LOG(HALConfig, Error) + << "Camera '" << cameraId + << "' not described in the HAL configuration file"; + return nullptr; + } + + return &it->second; +} diff --git a/spider-cam/libcamera/src/android/camera_hal_config.h b/spider-cam/libcamera/src/android/camera_hal_config.h new file mode 100644 index 0000000..a4bedb6 --- /dev/null +++ b/spider-cam/libcamera/src/android/camera_hal_config.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Camera HAL configuration file manager + */ + +#pragma once + +#include +#include + +#include + +struct CameraConfigData { + int facing = -1; + int rotation = -1; +}; + +class CameraHalConfig final : public libcamera::Extensible +{ + LIBCAMERA_DECLARE_PRIVATE() + +public: + CameraHalConfig(); + + bool exists() const { return exists_; } + bool isValid() const { return valid_; } + + const CameraConfigData *cameraConfigData(const std::string &cameraId) const; + +private: + bool exists_; + bool valid_; + std::map cameras_; + + int parseConfigurationFile(); +}; diff --git a/spider-cam/libcamera/src/android/camera_hal_manager.cpp b/spider-cam/libcamera/src/android/camera_hal_manager.cpp new file mode 100644 index 0000000..7500c74 --- /dev/null +++ b/spider-cam/libcamera/src/android/camera_hal_manager.cpp @@ -0,0 +1,296 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * libcamera Android Camera Manager + */ + +#include "camera_hal_manager.h" + +#include + +#include +#include + +#include "camera_device.h" + +using namespace libcamera; + +LOG_DECLARE_CATEGORY(HAL) + +/* + * \class CameraHalManager + * + * The HAL camera manager is initializated at camera_module_t 'hal_init()' time + * and spawns its own thread where libcamera related events are dispatched to. + * It wraps the libcamera CameraManager operations and provides helpers for the + * camera_module_t operations, to retrieve the number of cameras in the system, + * their static information and to open camera devices. + */ + +CameraHalManager::CameraHalManager() + : cameraManager_(nullptr), callbacks_(nullptr), numInternalCameras_(0), + nextExternalCameraId_(firstExternalCameraId_) +{ +} + +/* CameraManager calls stop() in the destructor. */ +CameraHalManager::~CameraHalManager() = default; + +/* static */ +CameraHalManager *CameraHalManager::instance() +{ + static CameraHalManager *cameraHalManager = new CameraHalManager; + return cameraHalManager; +} + +int CameraHalManager::init() +{ + cameraManager_ = std::make_unique(); + + /* + * If the configuration file is not available the HAL only supports + * external cameras. If it exists but it's not valid then error out. + */ + if (halConfig_.exists() && !halConfig_.isValid()) { + LOG(HAL, Error) << "HAL configuration file is not valid"; + return -EINVAL; + } + + /* Support camera hotplug. */ + cameraManager_->cameraAdded.connect(this, &CameraHalManager::cameraAdded); + cameraManager_->cameraRemoved.connect(this, &CameraHalManager::cameraRemoved); + + int ret = cameraManager_->start(); + if (ret) { + LOG(HAL, Error) << "Failed to start camera manager: " + << strerror(-ret); + cameraManager_.reset(); + return ret; + } + + return 0; +} + +std::tuple +CameraHalManager::open(unsigned int id, const hw_module_t *hardwareModule) +{ + MutexLocker locker(mutex_); + + if (!callbacks_) { + LOG(HAL, Error) << "Can't open camera before callbacks are set"; + return { nullptr, -ENODEV }; + } + + CameraDevice *camera = cameraDeviceFromHalId(id); + if (!camera) { + LOG(HAL, Error) << "Invalid camera id '" << id << "'"; + return { nullptr, -ENODEV }; + } + + int ret = camera->open(hardwareModule); + if (ret) + return { nullptr, ret }; + + LOG(HAL, Info) << "Open camera '" << id << "'"; + + return { camera, 0 }; +} + +void CameraHalManager::cameraAdded(std::shared_ptr cam) +{ + unsigned int id; + bool isCameraExternal = false; + bool isCameraNew = false; + + MutexLocker locker(mutex_); + + /* + * Each camera is assigned a unique integer ID when it is seen for the + * first time. If the camera has been seen before, the previous ID is + * re-used. + * + * IDs starts from '0' for internal cameras and '1000' for external + * cameras. + */ + auto iter = cameraIdsMap_.find(cam->id()); + if (iter != cameraIdsMap_.end()) { + id = iter->second; + if (id >= firstExternalCameraId_) + isCameraExternal = true; + } else { + isCameraNew = true; + + /* + * Now check if this is an external camera and assign + * its id accordingly. + */ + if (cameraLocation(cam.get()) == properties::CameraLocationExternal) { + isCameraExternal = true; + id = nextExternalCameraId_; + } else { + id = numInternalCameras_; + } + } + + /* + * The configuration file must be valid, and contain a corresponding + * entry for internal cameras. External cameras can be initialized + * without configuration file. + */ + if (!isCameraExternal && !halConfig_.exists()) { + LOG(HAL, Error) + << "HAL configuration file is mandatory for internal cameras." + << " Camera " << cam->id() << " failed to load"; + return; + } + + const CameraConfigData *cameraConfigData = halConfig_.cameraConfigData(cam->id()); + + /* + * Some cameras whose location is reported by libcamera as external may + * actually be internal to the device. This is common with UVC cameras + * that are integrated in a laptop. In that case the real location + * should be specified in the configuration file. + * + * If the camera location is external and a configuration entry exists + * for it, override its location. + */ + if (isCameraNew && isCameraExternal) { + if (cameraConfigData && cameraConfigData->facing != -1) { + isCameraExternal = false; + id = numInternalCameras_; + } + } + + if (!isCameraExternal && !cameraConfigData) { + LOG(HAL, Error) + << "HAL configuration entry for internal camera " + << cam->id() << " is missing"; + return; + } + + /* Create a CameraDevice instance to wrap the libcamera Camera. */ + std::unique_ptr camera = CameraDevice::create(id, cam); + + int ret = camera->initialize(cameraConfigData); + if (ret) { + LOG(HAL, Error) << "Failed to initialize camera: " << cam->id(); + return; + } + + if (isCameraNew) { + cameraIdsMap_.emplace(cam->id(), id); + + if (isCameraExternal) + nextExternalCameraId_++; + else + numInternalCameras_++; + } + + cameras_.emplace_back(std::move(camera)); + + if (callbacks_) + callbacks_->camera_device_status_change(callbacks_, id, + CAMERA_DEVICE_STATUS_PRESENT); + + LOG(HAL, Debug) << "Camera ID: " << id << " added successfully."; +} + +void CameraHalManager::cameraRemoved(std::shared_ptr cam) +{ + MutexLocker locker(mutex_); + + auto iter = std::find_if(cameras_.begin(), cameras_.end(), + [&cam](const std::unique_ptr &camera) { + return cam == camera->camera(); + }); + if (iter == cameras_.end()) + return; + + /* + * CAMERA_DEVICE_STATUS_NOT_PRESENT should be set for external cameras + * only. + */ + unsigned int id = (*iter)->id(); + if (id >= firstExternalCameraId_) + callbacks_->camera_device_status_change(callbacks_, id, + CAMERA_DEVICE_STATUS_NOT_PRESENT); + + /* + * \todo Check if the camera is already open and running. + * Inform the framework about its absence before deleting its + * reference here. + */ + cameras_.erase(iter); + + LOG(HAL, Debug) << "Camera ID: " << id << " removed successfully."; +} + +int32_t CameraHalManager::cameraLocation(const Camera *cam) +{ + return cam->properties().get(properties::Location).value_or(-1); +} + +CameraDevice *CameraHalManager::cameraDeviceFromHalId(unsigned int id) +{ + auto iter = std::find_if(cameras_.begin(), cameras_.end(), + [id](const std::unique_ptr &camera) { + return camera->id() == id; + }); + if (iter == cameras_.end()) + return nullptr; + + return iter->get(); +} + +unsigned int CameraHalManager::numCameras() const +{ + return numInternalCameras_; +} + +int CameraHalManager::getCameraInfo(unsigned int id, struct camera_info *info) +{ + if (!info) + return -EINVAL; + + MutexLocker locker(mutex_); + + CameraDevice *camera = cameraDeviceFromHalId(id); + if (!camera) { + LOG(HAL, Error) << "Invalid camera id '" << id << "'"; + return -EINVAL; + } + + info->facing = camera->facing(); + info->orientation = camera->orientation(); + info->device_version = CAMERA_DEVICE_API_VERSION_3_3; + info->resource_cost = 0; + info->static_camera_characteristics = camera->getStaticMetadata(); + info->conflicting_devices = nullptr; + info->conflicting_devices_length = 0; + + return 0; +} + +void CameraHalManager::setCallbacks(const camera_module_callbacks_t *callbacks) +{ + callbacks_ = callbacks; + + MutexLocker locker(mutex_); + + /* + * Some external cameras may have been identified before the callbacks_ + * were set. Iterate all existing external cameras and mark them as + * CAMERA_DEVICE_STATUS_PRESENT explicitly. + * + * Internal cameras are already assumed to be present at module load + * time by the Android framework. + */ + for (const std::unique_ptr &camera : cameras_) { + unsigned int id = camera->id(); + if (id >= firstExternalCameraId_) + callbacks_->camera_device_status_change(callbacks_, id, + CAMERA_DEVICE_STATUS_PRESENT); + } +} diff --git a/spider-cam/libcamera/src/android/camera_hal_manager.h b/spider-cam/libcamera/src/android/camera_hal_manager.h new file mode 100644 index 0000000..836a8da --- /dev/null +++ b/spider-cam/libcamera/src/android/camera_hal_manager.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * libcamera Android Camera Manager + */ + +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include + +#include "camera_hal_config.h" + +class CameraDevice; + +class CameraHalManager +{ +public: + ~CameraHalManager(); + + static CameraHalManager *instance(); + + int init(); + + std::tuple + open(unsigned int id, const hw_module_t *module); + + unsigned int numCameras() const; + int getCameraInfo(unsigned int id, struct camera_info *info); + void setCallbacks(const camera_module_callbacks_t *callbacks); + +private: + LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraHalManager) + + static constexpr unsigned int firstExternalCameraId_ = 1000; + + CameraHalManager(); + + static int32_t cameraLocation(const libcamera::Camera *cam); + + void cameraAdded(std::shared_ptr cam); + void cameraRemoved(std::shared_ptr cam); + + CameraDevice *cameraDeviceFromHalId(unsigned int id) LIBCAMERA_TSA_REQUIRES(mutex_); + + std::unique_ptr cameraManager_; + CameraHalConfig halConfig_; + + const camera_module_callbacks_t *callbacks_; + std::vector> cameras_ LIBCAMERA_TSA_GUARDED_BY(mutex_); + std::map cameraIdsMap_ LIBCAMERA_TSA_GUARDED_BY(mutex_); + libcamera::Mutex mutex_; + + unsigned int numInternalCameras_; + unsigned int nextExternalCameraId_; +}; diff --git a/spider-cam/libcamera/src/android/camera_metadata.cpp b/spider-cam/libcamera/src/android/camera_metadata.cpp new file mode 100644 index 0000000..99f033f --- /dev/null +++ b/spider-cam/libcamera/src/android/camera_metadata.cpp @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * libcamera Android Camera Metadata Helper + */ + +#include "camera_metadata.h" + +#include + +using namespace libcamera; + +LOG_DEFINE_CATEGORY(CameraMetadata) + +CameraMetadata::CameraMetadata() + : metadata_(nullptr), valid_(false), resized_(false) +{ +} + +CameraMetadata::CameraMetadata(size_t entryCapacity, size_t dataCapacity) + : resized_(false) +{ + metadata_ = allocate_camera_metadata(entryCapacity, dataCapacity); + valid_ = metadata_ != nullptr; +} + +CameraMetadata::CameraMetadata(const camera_metadata_t *metadata) + : resized_(false) +{ + metadata_ = clone_camera_metadata(metadata); + valid_ = metadata_ != nullptr; +} + +CameraMetadata::CameraMetadata(const CameraMetadata &other) + : CameraMetadata(other.getMetadata()) +{ +} + +CameraMetadata::~CameraMetadata() +{ + if (metadata_) + free_camera_metadata(metadata_); +} + +CameraMetadata &CameraMetadata::operator=(const CameraMetadata &other) +{ + if (this == &other) + return *this; + + if (metadata_) + free_camera_metadata(metadata_); + + metadata_ = clone_camera_metadata(other.getMetadata()); + valid_ = metadata_ != nullptr; + + return *this; +} + +std::tuple CameraMetadata::usage() const +{ + size_t currentEntryCount = get_camera_metadata_entry_count(metadata_); + size_t currentDataCount = get_camera_metadata_data_count(metadata_); + + return { currentEntryCount, currentDataCount }; +} + +bool CameraMetadata::getEntry(uint32_t tag, camera_metadata_ro_entry_t *entry) const +{ + if (find_camera_metadata_ro_entry(metadata_, tag, entry)) + return false; + + return true; +} + +/* + * \brief Resize the metadata container, if necessary + * \param[in] count Number of entries to add to the container + * \param[in] size Total size of entries to add, in bytes + * \return True if resize was successful or unnecessary, false otherwise + */ +bool CameraMetadata::resize(size_t count, size_t size) +{ + if (!valid_) + return false; + + if (!count && !size) + return true; + + size_t currentEntryCount = get_camera_metadata_entry_count(metadata_); + size_t currentEntryCapacity = get_camera_metadata_entry_capacity(metadata_); + size_t newEntryCapacity = currentEntryCapacity < currentEntryCount + count ? + currentEntryCapacity * 2 : currentEntryCapacity; + + size_t currentDataCount = get_camera_metadata_data_count(metadata_); + size_t currentDataCapacity = get_camera_metadata_data_capacity(metadata_); + size_t newDataCapacity = currentDataCapacity < currentDataCount + size ? + currentDataCapacity * 2 : currentDataCapacity; + + if (newEntryCapacity > currentEntryCapacity || + newDataCapacity > currentDataCapacity) { + camera_metadata_t *oldMetadata = metadata_; + metadata_ = allocate_camera_metadata(newEntryCapacity, newDataCapacity); + if (!metadata_) { + metadata_ = oldMetadata; + return false; + } + + LOG(CameraMetadata, Info) + << "Resized: old entry capacity " << currentEntryCapacity + << ", old data capacity " << currentDataCapacity + << ", new entry capacity " << newEntryCapacity + << ", new data capacity " << newDataCapacity; + + append_camera_metadata(metadata_, oldMetadata); + free_camera_metadata(oldMetadata); + + resized_ = true; + } + + return true; +} + +template<> bool CameraMetadata::entryContains(uint32_t tag, uint8_t value) const +{ + camera_metadata_ro_entry_t entry; + if (!getEntry(tag, &entry)) + return false; + + for (unsigned int i = 0; i < entry.count; i++) { + if (entry.data.u8[i] == value) + return true; + } + + return false; +} + +bool CameraMetadata::hasEntry(uint32_t tag) const +{ + camera_metadata_ro_entry_t entry; + return getEntry(tag, &entry); +} + +bool CameraMetadata::addEntry(uint32_t tag, const void *data, size_t count, + size_t elementSize) +{ + if (!valid_) + return false; + + if (!resize(1, count * elementSize)) { + LOG(CameraMetadata, Error) << "Failed to resize"; + valid_ = false; + return false; + } + + if (!add_camera_metadata_entry(metadata_, tag, data, count)) + return true; + + const char *name = get_camera_metadata_tag_name(tag); + if (name) + LOG(CameraMetadata, Error) + << "Failed to add tag " << name; + else + LOG(CameraMetadata, Error) + << "Failed to add unknown tag " << tag; + + valid_ = false; + + return false; +} + +bool CameraMetadata::updateEntry(uint32_t tag, const void *data, size_t count, + size_t elementSize) +{ + if (!valid_) + return false; + + camera_metadata_entry_t entry; + int ret = find_camera_metadata_entry(metadata_, tag, &entry); + if (ret) { + const char *name = get_camera_metadata_tag_name(tag); + LOG(CameraMetadata, Error) + << "Failed to update tag " + << (name ? name : "") << ": not present"; + return false; + } + + if (camera_metadata_type_size[entry.type] != elementSize) { + const char *name = get_camera_metadata_tag_name(tag); + LOG(CameraMetadata, Fatal) + << "Invalid element size for tag " + << (name ? name : ""); + return false; + } + + size_t oldSize = + calculate_camera_metadata_entry_data_size(entry.type, + entry.count); + size_t newSize = + calculate_camera_metadata_entry_data_size(entry.type, + count); + size_t sizeIncrement = newSize - oldSize > 0 ? newSize - oldSize : 0; + if (!resize(0, sizeIncrement)) { + LOG(CameraMetadata, Error) << "Failed to resize"; + valid_ = false; + return false; + } + + ret = update_camera_metadata_entry(metadata_, entry.index, data, + count, nullptr); + if (!ret) + return true; + + const char *name = get_camera_metadata_tag_name(tag); + LOG(CameraMetadata, Error) + << "Failed to update tag " << (name ? name : ""); + + valid_ = false; + + return false; +} + +camera_metadata_t *CameraMetadata::getMetadata() +{ + return valid_ ? metadata_ : nullptr; +} + +const camera_metadata_t *CameraMetadata::getMetadata() const +{ + return valid_ ? metadata_ : nullptr; +} diff --git a/spider-cam/libcamera/src/android/camera_metadata.h b/spider-cam/libcamera/src/android/camera_metadata.h new file mode 100644 index 0000000..474f280 --- /dev/null +++ b/spider-cam/libcamera/src/android/camera_metadata.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * libcamera Android Camera Metadata Helper + */ + +#pragma once + +#include +#include + +#include + +class CameraMetadata +{ +public: + CameraMetadata(); + CameraMetadata(size_t entryCapacity, size_t dataCapacity); + CameraMetadata(const camera_metadata_t *metadata); + CameraMetadata(const CameraMetadata &other); + ~CameraMetadata(); + + CameraMetadata &operator=(const CameraMetadata &other); + + std::tuple usage() const; + bool resized() const { return resized_; } + + bool isValid() const { return valid_; } + bool getEntry(uint32_t tag, camera_metadata_ro_entry_t *entry) const; + + template bool entryContains(uint32_t tag, T value) const; + + bool hasEntry(uint32_t tag) const; + + template || + std::is_enum_v> * = nullptr> + bool setEntry(uint32_t tag, const T &data) + { + if (hasEntry(tag)) + return updateEntry(tag, &data, 1, sizeof(T)); + else + return addEntry(tag, &data, 1, sizeof(T)); + } + + template || + std::is_enum_v> * = nullptr> + bool addEntry(uint32_t tag, const T &data) + { + return addEntry(tag, &data, 1, sizeof(T)); + } + + template + bool addEntry(uint32_t tag, const T (&data)[size]) + { + return addEntry(tag, data, size, sizeof(T)); + } + + template + bool addEntry(uint32_t tag, const S &data) + { + return addEntry(tag, data.data(), data.size(), sizeof(T)); + } + + template + bool addEntry(uint32_t tag, const T *data, size_t count) + { + return addEntry(tag, data, count, sizeof(T)); + } + + template + bool updateEntry(uint32_t tag, const T &data) + { + return updateEntry(tag, &data, 1, sizeof(T)); + } + + template + bool updateEntry(uint32_t tag, const T (&data)[size]) + { + return updateEntry(tag, data, size, sizeof(T)); + } + + template + bool updateEntry(uint32_t tag, const S &data) + { + return updateEntry(tag, data.data(), data.size(), sizeof(T)); + } + + template + bool updateEntry(uint32_t tag, const T *data, size_t count) + { + return updateEntry(tag, data, count, sizeof(T)); + } + + camera_metadata_t *getMetadata(); + const camera_metadata_t *getMetadata() const; + +private: + bool resize(size_t count, size_t size); + bool addEntry(uint32_t tag, const void *data, size_t count, + size_t elementSize); + bool updateEntry(uint32_t tag, const void *data, size_t count, + size_t elementSize); + + camera_metadata_t *metadata_; + bool valid_; + bool resized_; +}; diff --git a/spider-cam/libcamera/src/android/camera_ops.cpp b/spider-cam/libcamera/src/android/camera_ops.cpp new file mode 100644 index 0000000..ecaac5a --- /dev/null +++ b/spider-cam/libcamera/src/android/camera_ops.cpp @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Android Camera HAL Operations + */ + +#include "camera_ops.h" + +#include + +#include "camera_device.h" + +using namespace libcamera; + +/* + * Translation layer between the Android Camera HAL device operations and the + * CameraDevice. + */ + +static int hal_dev_initialize(const struct camera3_device *dev, + const camera3_callback_ops_t *callback_ops) +{ + if (!dev) + return -EINVAL; + + CameraDevice *camera = reinterpret_cast(dev->priv); + camera->setCallbacks(callback_ops); + + return 0; +} + +static int hal_dev_configure_streams(const struct camera3_device *dev, + camera3_stream_configuration_t *stream_list) +{ + if (!dev) + return -EINVAL; + + CameraDevice *camera = reinterpret_cast(dev->priv); + return camera->configureStreams(stream_list); +} + +static const camera_metadata_t * +hal_dev_construct_default_request_settings(const struct camera3_device *dev, + int type) +{ + if (!dev) + return nullptr; + + CameraDevice *camera = reinterpret_cast(dev->priv); + return camera->constructDefaultRequestSettings(type); +} + +static int hal_dev_process_capture_request(const struct camera3_device *dev, + camera3_capture_request_t *request) +{ + if (!dev) + return -EINVAL; + + CameraDevice *camera = reinterpret_cast(dev->priv); + return camera->processCaptureRequest(request); +} + +static void hal_dev_dump([[maybe_unused]] const struct camera3_device *dev, + [[maybe_unused]] int fd) +{ +} + +static int hal_dev_flush(const struct camera3_device *dev) +{ + if (!dev) + return -EINVAL; + + CameraDevice *camera = reinterpret_cast(dev->priv); + camera->flush(); + + return 0; +} + +int hal_dev_close(hw_device_t *hw_device) +{ + if (!hw_device) + return -EINVAL; + + camera3_device_t *dev = reinterpret_cast(hw_device); + CameraDevice *camera = reinterpret_cast(dev->priv); + + camera->close(); + + return 0; +} + +camera3_device_ops hal_dev_ops = { + .initialize = hal_dev_initialize, + .configure_streams = hal_dev_configure_streams, + .register_stream_buffers = nullptr, + .construct_default_request_settings = hal_dev_construct_default_request_settings, + .process_capture_request = hal_dev_process_capture_request, + .get_metadata_vendor_tag_ops = nullptr, + .dump = hal_dev_dump, + .flush = hal_dev_flush, + .reserved = { nullptr }, +}; diff --git a/spider-cam/libcamera/src/android/camera_ops.h b/spider-cam/libcamera/src/android/camera_ops.h new file mode 100644 index 0000000..750dc94 --- /dev/null +++ b/spider-cam/libcamera/src/android/camera_ops.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Android Camera HAL Operations + */ + +#pragma once + +#include + +int hal_dev_close(hw_device_t *hw_device); +extern camera3_device_ops hal_dev_ops; diff --git a/spider-cam/libcamera/src/android/camera_request.cpp b/spider-cam/libcamera/src/android/camera_request.cpp new file mode 100644 index 0000000..0d45960 --- /dev/null +++ b/spider-cam/libcamera/src/android/camera_request.cpp @@ -0,0 +1,194 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019-2021, Google Inc. + * + * libcamera Android Camera Request Descriptor + */ + +#include "camera_request.h" + +#include + +#include "camera_buffer.h" + +using namespace libcamera; + +/* + * \class Camera3RequestDescriptor + * + * A utility class that groups information about a capture request to be later + * reused at request complete time to notify the framework. + * + ******************************************************************************* + * Lifetime of a Camera3RequestDescriptor tracking a capture request placed by + * Android Framework + ******************************************************************************* + * + * + * Android Framework + * │ + * │ ┌──────────────────────────────────┠+ * │ │camera3_capture_request_t │ + * │ │ │ + * │ │Requested output streams │ + * │ │ stream1 stream2 stream3 ... │ + * │ └──────────────────────────────────┘ + * â–¼ + * ┌─────────────────────────────────────────────────────────────┠+ * │ libcamera HAL │ + * ├─────────────────────────────────────────────────────────────┤ + * │ CameraDevice │ + * │ │ + * │ processCaptureRequest(camera3_capture_request_t request) │ + * │ │ + * │ - Create Camera3RequestDescriptor tracking this request │ + * │ - Streams requiring post-processing are stored in the │ + * │ pendingStreamsToProcess map │ + * │ - Add this Camera3RequestDescriptor to descriptors' queue │ + * │ CameraDevice::descriptors_ │ + * │ │ ┌─────────────────────────┠+ * │ - Queue the capture request to libcamera core ────────────┤►│libcamera core │ + * │ │ ├─────────────────────────┤ + * │ │ │- Capture from Camera │ + * │ │ │ │ + * │ │ │- Emit │ + * │ │ │ Camera::requestComplete│ + * │ requestCompleted(Request *request) ◄───────────────────────┼─┼──── │ + * │ │ │ │ + * │ - Check request completion status │ └─────────────────────────┘ + * │ │ + * │ - if (pendingStreamsToProcess > 0) │ + * │ Queue all entries from pendingStreamsToProcess │ + * │ else │ │ + * │ completeDescriptor() │ └──────────────────────┠+ * │ │ │ + * │ ┌──────────────────────────┴───┬──────────────────┠│ + * │ │ │ │ │ + * │ ┌──────────▼────────────┠┌───────────▼─────────┠▼ │ + * │ │CameraStream1 │ │CameraStream2 │ .... │ + * │ ├┬───┬───┬──────────────┤ ├┬───┬───┬────────────┤ │ + * │ ││ │ │ │ ││ │ │ │ │ + * │ │▼───▼───▼──────────────┤ │▼───▼───▼────────────┤ │ + * │ │PostProcessorWorker │ │PostProcessorWorker │ │ + * │ │ │ │ │ │ + * │ │ +------------------+ │ │ +------------------+│ │ + * │ │ | PostProcessor | │ │ | PostProcessor |│ │ + * │ │ | process() | │ │ | process() |│ │ + * │ │ | | │ │ | |│ │ + * │ │ | Emit | │ │ | Emit |│ │ + * │ │ | processComplete | │ │ | processComplete |│ │ + * │ │ | | │ │ | |│ │ + * │ │ +--------------│---+ │ │ +--------------│---+│ │ + * │ │ │ │ │ │ │ │ + * │ │ │ │ │ │ │ │ + * │ └────────────────┼──────┘ └────────────────┼────┘ │ + * │ │ │ │ + * │ │ │ │ + * │ │ │ │ + * │ â–¼ â–¼ │ + * │ +---------------------------------------+ +--------------+ │ + * │ | CameraDevice | | | │ + * │ | | | | │ + * │ | streamProcessingComplete() | | | │ + * │ | | | | │ + * │ | - Check and set buffer status | | .... | │ + * │ | - Remove post+processing entry | | | │ + * │ | from pendingStreamsToProcess | | | │ + * │ | | | | │ + * │ | - if (pendingStreamsToProcess.empty())| | | │ + * │ | completeDescriptor | | | │ + * │ | | | | │ + * │ +---------------------------------------+ +--------------+ │ + * │ │ + * └────────────────────────────────────────────────────────────────────────────────────┘ + * + * +-------------+ + * | | - PostProcessorWorker's thread + * | | + * +-------------+ + */ + +Camera3RequestDescriptor::Camera3RequestDescriptor( + Camera *camera, const camera3_capture_request_t *camera3Request) +{ + frameNumber_ = camera3Request->frame_number; + + /* Copy the camera3 request stream information for later access. */ + const Span buffers{ + camera3Request->output_buffers, + camera3Request->num_output_buffers + }; + + buffers_.reserve(buffers.size()); + + for (const camera3_stream_buffer_t &buffer : buffers) { + CameraStream *stream = + static_cast(buffer.stream->priv); + + buffers_.emplace_back(stream, buffer, this); + } + + /* Clone the controls associated with the camera3 request. */ + settings_ = CameraMetadata(camera3Request->settings); + + /* + * Create the CaptureRequest, stored as a unique_ptr<> to tie its + * lifetime to the descriptor. + */ + request_ = camera->createRequest(reinterpret_cast(this)); +} + +Camera3RequestDescriptor::~Camera3RequestDescriptor() = default; + +/** + * \struct Camera3RequestDescriptor::StreamBuffer + * \brief Group information for per-stream buffer of Camera3RequestDescriptor + * + * A capture request placed to the libcamera HAL can contain multiple streams. + * Each stream will have an associated buffer to be filled. StreamBuffer + * tracks this buffer with contextual information which aids in the stream's + * generation. The generation of the stream will depend on its type (refer to + * the CameraStream::Type documentation). + * + * \var Camera3RequestDescriptor::StreamBuffer::stream + * \brief Pointer to the corresponding CameraStream + * + * \var Camera3RequestDescriptor::StreamBuffer::camera3Buffer + * \brief Native handle to the buffer + * + * \var Camera3RequestDescriptor::StreamBuffer::frameBuffer + * \brief Encapsulate the dmabuf handle inside a libcamera::FrameBuffer for + * direct streams + * + * \var Camera3RequestDescriptor::StreamBuffer::fence + * \brief Acquire fence of the buffer + * + * \var Camera3RequestDescriptor::StreamBuffer::status + * \brief Track the status of the buffer + * + * \var Camera3RequestDescriptor::StreamBuffer::internalBuffer + * \brief Pointer to a buffer internally handled by CameraStream (if any) + * + * \var Camera3RequestDescriptor::StreamBuffer::srcBuffer + * \brief Pointer to the source frame buffer used for post-processing + * + * \var Camera3RequestDescriptor::StreamBuffer::dstBuffer + * \brief Pointer to the destination frame buffer used for post-processing + * + * \var Camera3RequestDescriptor::StreamBuffer::request + * \brief Back pointer to the Camera3RequestDescriptor to which the StreamBuffer belongs + */ +Camera3RequestDescriptor::StreamBuffer::StreamBuffer( + CameraStream *cameraStream, const camera3_stream_buffer_t &buffer, + Camera3RequestDescriptor *requestDescriptor) + : stream(cameraStream), camera3Buffer(buffer.buffer), + fence(buffer.acquire_fence), request(requestDescriptor) +{ +} + +Camera3RequestDescriptor::StreamBuffer::~StreamBuffer() = default; + +Camera3RequestDescriptor::StreamBuffer::StreamBuffer(StreamBuffer &&) = default; + +Camera3RequestDescriptor::StreamBuffer & +Camera3RequestDescriptor::StreamBuffer::operator=(Camera3RequestDescriptor::StreamBuffer &&) = default; diff --git a/spider-cam/libcamera/src/android/camera_request.h b/spider-cam/libcamera/src/android/camera_request.h new file mode 100644 index 0000000..5b47918 --- /dev/null +++ b/spider-cam/libcamera/src/android/camera_request.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019-2021, Google Inc. + * + * libcamera Android Camera Request Descriptor + */ + +#pragma once + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include + +#include "camera_metadata.h" +#include "hal_framebuffer.h" + +class CameraBuffer; +class CameraStream; + +class Camera3RequestDescriptor +{ +public: + enum class Status { + Success, + Error, + }; + + struct StreamBuffer { + StreamBuffer(CameraStream *stream, + const camera3_stream_buffer_t &buffer, + Camera3RequestDescriptor *request); + ~StreamBuffer(); + + StreamBuffer(StreamBuffer &&); + StreamBuffer &operator=(StreamBuffer &&); + + CameraStream *stream; + buffer_handle_t *camera3Buffer; + std::unique_ptr frameBuffer; + libcamera::UniqueFD fence; + Status status = Status::Success; + libcamera::FrameBuffer *internalBuffer = nullptr; + const libcamera::FrameBuffer *srcBuffer = nullptr; + std::unique_ptr dstBuffer; + Camera3RequestDescriptor *request; + + private: + LIBCAMERA_DISABLE_COPY(StreamBuffer) + }; + + /* Keeps track of streams requiring post-processing. */ + std::map pendingStreamsToProcess_ + LIBCAMERA_TSA_GUARDED_BY(streamsProcessMutex_); + libcamera::Mutex streamsProcessMutex_; + + Camera3RequestDescriptor(libcamera::Camera *camera, + const camera3_capture_request_t *camera3Request); + ~Camera3RequestDescriptor(); + + bool isPending() const { return !complete_; } + + uint32_t frameNumber_ = 0; + + std::vector buffers_; + + CameraMetadata settings_; + std::unique_ptr request_; + std::unique_ptr resultMetadata_; + + bool complete_ = false; + Status status_ = Status::Success; + +private: + LIBCAMERA_DISABLE_COPY(Camera3RequestDescriptor) +}; diff --git a/spider-cam/libcamera/src/android/camera_stream.cpp b/spider-cam/libcamera/src/android/camera_stream.cpp new file mode 100644 index 0000000..1d68540 --- /dev/null +++ b/spider-cam/libcamera/src/android/camera_stream.cpp @@ -0,0 +1,341 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * Camera HAL stream + */ + +#include "camera_stream.h" + +#include +#include +#include +#include +#include + +#include + +#include "jpeg/post_processor_jpeg.h" +#include "yuv/post_processor_yuv.h" + +#include "camera_buffer.h" +#include "camera_capabilities.h" +#include "camera_device.h" +#include "camera_metadata.h" +#include "frame_buffer_allocator.h" +#include "post_processor.h" + +using namespace libcamera; + +LOG_DECLARE_CATEGORY(HAL) + +/* + * \class CameraStream + * \brief Map a camera3_stream_t to a StreamConfiguration + * + * The CameraStream class maps a camera3_stream_t provided by Android + * camera framework to a libcamera::StreamConfiguration. + * + * The StreamConfiguration is represented by its index as recorded in the + * CameraConfiguration and not by pointer as StreamConfiguration is subject to + * relocation. + * + * A single StreamConfiguration may be used to deliver one or more streams to + * the Android framework. The mapping type between a camera3 stream to a + * StreamConfiguration is described by the CameraStream::Type. + * + * CameraStream handles all the aspects of producing a stream with the size + * and format requested by the camera3 stream from the data produced by + * the associated libcamera::Stream, including the creation of the encoder + * and buffer allocation. + */ + +CameraStream::CameraStream(CameraDevice *const cameraDevice, + CameraConfiguration *config, Type type, + camera3_stream_t *camera3Stream, + CameraStream *const sourceStream, unsigned int index) + : cameraDevice_(cameraDevice), config_(config), type_(type), + camera3Stream_(camera3Stream), sourceStream_(sourceStream), + index_(index) +{ +} + +CameraStream::CameraStream(CameraStream &&other) = default; + +CameraStream::~CameraStream() +{ + /* + * Manually delete buffers and then the allocator to make sure buffers + * are released while the allocator is still valid. + */ + allocatedBuffers_.clear(); + allocator_.reset(); +} + +const StreamConfiguration &CameraStream::configuration() const +{ + return config_->at(index_); +} + +Stream *CameraStream::stream() const +{ + return configuration().stream(); +} + +int CameraStream::configure() +{ + if (type_ == Type::Internal || type_ == Type::Mapped) { + const PixelFormat outFormat = + cameraDevice_->capabilities()->toPixelFormat(camera3Stream_->format); + StreamConfiguration output = configuration(); + output.pixelFormat = outFormat; + output.size.width = camera3Stream_->width; + output.size.height = camera3Stream_->height; + + switch (outFormat) { + case formats::NV12: + postProcessor_ = std::make_unique(); + break; + + case formats::MJPEG: + postProcessor_ = std::make_unique(cameraDevice_); + break; + + default: + LOG(HAL, Error) << "Unsupported format: " << outFormat; + return -EINVAL; + } + + int ret = postProcessor_->configure(configuration(), output); + if (ret) + return ret; + + worker_ = std::make_unique(postProcessor_.get()); + postProcessor_->processComplete.connect( + this, [&](Camera3RequestDescriptor::StreamBuffer *streamBuffer, + PostProcessor::Status status) { + Camera3RequestDescriptor::Status bufferStatus; + + if (status == PostProcessor::Status::Success) + bufferStatus = Camera3RequestDescriptor::Status::Success; + else + bufferStatus = Camera3RequestDescriptor::Status::Error; + + cameraDevice_->streamProcessingComplete(streamBuffer, + bufferStatus); + }); + + worker_->start(); + } + + allocator_ = std::make_unique(cameraDevice_); + mutex_ = std::make_unique(); + + camera3Stream_->max_buffers = configuration().bufferCount; + + return 0; +} + +int CameraStream::waitFence(int fence) +{ + /* + * \todo The implementation here is copied from camera_worker.cpp + * and both should be removed once libcamera is instrumented to handle + * fences waiting in the core. + * + * \todo Better characterize the timeout. Currently equal to the one + * used by the Rockchip Camera HAL on ChromeOS. + */ + constexpr unsigned int timeoutMs = 300; + struct pollfd fds = { fence, POLLIN, 0 }; + + do { + int ret = poll(&fds, 1, timeoutMs); + if (ret == 0) + return -ETIME; + + if (ret > 0) { + if (fds.revents & (POLLERR | POLLNVAL)) + return -EINVAL; + + return 0; + } + } while (errno == EINTR || errno == EAGAIN); + + return -errno; +} + +int CameraStream::process(Camera3RequestDescriptor::StreamBuffer *streamBuffer) +{ + ASSERT(type_ != Type::Direct); + + /* Handle waiting on fences on the destination buffer. */ + if (streamBuffer->fence.isValid()) { + int ret = waitFence(streamBuffer->fence.get()); + if (ret < 0) { + LOG(HAL, Error) << "Failed waiting for fence: " + << streamBuffer->fence.get() << ": " + << strerror(-ret); + return ret; + } + + streamBuffer->fence.reset(); + } + + const StreamConfiguration &output = configuration(); + streamBuffer->dstBuffer = std::make_unique( + *streamBuffer->camera3Buffer, output.pixelFormat, output.size, + PROT_READ | PROT_WRITE); + if (!streamBuffer->dstBuffer->isValid()) { + LOG(HAL, Error) << "Failed to create destination buffer"; + return -EINVAL; + } + + worker_->queueRequest(streamBuffer); + + return 0; +} + +void CameraStream::flush() +{ + if (!postProcessor_) + return; + + worker_->flush(); +} + +FrameBuffer *CameraStream::getBuffer() +{ + if (!allocator_) + return nullptr; + + MutexLocker locker(*mutex_); + + if (buffers_.empty()) { + /* + * Use HAL_PIXEL_FORMAT_YCBCR_420_888 unconditionally. + * + * YCBCR_420 is the source format for both the JPEG and the YUV + * post-processors. + * + * \todo Store a reference to the format of the source stream + * instead of hardcoding. + */ + auto frameBuffer = allocator_->allocate(HAL_PIXEL_FORMAT_YCBCR_420_888, + configuration().size, + camera3Stream_->usage); + allocatedBuffers_.push_back(std::move(frameBuffer)); + buffers_.emplace_back(allocatedBuffers_.back().get()); + } + + FrameBuffer *buffer = buffers_.back(); + buffers_.pop_back(); + + return buffer; +} + +void CameraStream::putBuffer(FrameBuffer *buffer) +{ + if (!allocator_) + return; + + MutexLocker locker(*mutex_); + + buffers_.push_back(buffer); +} + +/** + * \class CameraStream::PostProcessorWorker + * \brief Post-process a CameraStream in an internal thread + * + * If the association between CameraStream and camera3_stream_t dictated by + * CameraStream::Type is internal or mapped, the stream is generated by post + * processing of a libcamera stream. Such a request is queued to a + * PostProcessorWorker in CameraStream::process(). A queue of post-processing + * requests is maintained by the PostProcessorWorker and it will run the + * post-processing on an internal thread as soon as any request is available on + * its queue. + */ +CameraStream::PostProcessorWorker::PostProcessorWorker(PostProcessor *postProcessor) + : postProcessor_(postProcessor) +{ +} + +CameraStream::PostProcessorWorker::~PostProcessorWorker() +{ + { + MutexLocker lock(mutex_); + state_ = State::Stopped; + } + + cv_.notify_one(); + wait(); +} + +void CameraStream::PostProcessorWorker::start() +{ + { + MutexLocker lock(mutex_); + ASSERT(state_ != State::Running); + state_ = State::Running; + } + + Thread::start(); +} + +void CameraStream::PostProcessorWorker::queueRequest(Camera3RequestDescriptor::StreamBuffer *dest) +{ + { + MutexLocker lock(mutex_); + ASSERT(state_ == State::Running); + requests_.push(dest); + } + + cv_.notify_one(); +} + +void CameraStream::PostProcessorWorker::run() +{ + MutexLocker locker(mutex_); + + while (1) { + cv_.wait(locker, [&]() LIBCAMERA_TSA_REQUIRES(mutex_) { + return state_ != State::Running || !requests_.empty(); + }); + + if (state_ != State::Running) + break; + + Camera3RequestDescriptor::StreamBuffer *streamBuffer = requests_.front(); + requests_.pop(); + locker.unlock(); + + postProcessor_->process(streamBuffer); + + locker.lock(); + } + + if (state_ == State::Flushing) { + std::queue requests = + std::move(requests_); + locker.unlock(); + + while (!requests.empty()) { + postProcessor_->processComplete.emit( + requests.front(), PostProcessor::Status::Error); + requests.pop(); + } + + locker.lock(); + state_ = State::Stopped; + } +} + +void CameraStream::PostProcessorWorker::flush() +{ + MutexLocker lock(mutex_); + state_ = State::Flushing; + lock.unlock(); + + cv_.notify_one(); +} diff --git a/spider-cam/libcamera/src/android/camera_stream.h b/spider-cam/libcamera/src/android/camera_stream.h new file mode 100644 index 0000000..395552d --- /dev/null +++ b/spider-cam/libcamera/src/android/camera_stream.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * Camera HAL stream + */ + +#pragma once + +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include +#include + +#include "camera_request.h" +#include "post_processor.h" + +class CameraDevice; +class PlatformFrameBufferAllocator; + +class CameraStream +{ +public: + /* + * Enumeration of CameraStream types. + * + * A camera stream associates an Android stream to a libcamera stream. + * This enumeration describes how the two streams are associated and how + * and where data produced from libcamera are delivered to the + * Android framework. + * + * Direct: + * + * The Android stream is directly mapped onto a libcamera stream: frames + * are delivered by the library directly in the memory location + * specified by the Android stream (buffer_handle_t->data) and provided + * to the framework as they are. The Android stream characteristics are + * directly translated to the libcamera stream configuration. + * + * +-----+ +-----+ + * | A | | L | + * +-----+ +-----+ + * | | + * V V + * +-----+ +------+ + * | B |<---------------| FB | + * +-----+ +------+ + * + * + * Internal: + * + * Data for the Android stream is produced by processing a libcamera + * stream created by the HAL for that purpose. The libcamera stream + * needs to be supplied with intermediate buffers where the library + * delivers frames to be processed and then provided to the framework. + * The libcamera stream configuration is not a direct translation of the + * Android stream characteristics, but it describes the format and size + * required for the processing procedure to produce frames in the + * Android required format. + * + * +-----+ +-----+ + * | A | | L | + * +-----+ +-----+ + * | | + * V V + * +-----+ +------+ + * | B | | FB | + * +-----+ +------+ + * ^ | + * |-------Processing------| + * + * + * Mapped: + * + * Data for the Android stream is produced by processing a libcamera + * stream associated with another CameraStream. Mapped camera streams do + * not need any memory to be reserved for them as they process data + * produced by libcamera for a different stream whose format and size + * are compatible with the processing procedure requirements to produce + * frames in the Android required format. + * + * +-----+ +-----+ +-----+ + * | A | | A' | | L | + * +-----+ +-----+ +-----+ + * | | | + * V V V + * +-----+ +-----+ +------+ + * | B | | B' |<---------| FB | + * +-----+ +-----+ +------+ + * ^ | + * |--Processing--| + * + * + * -------------------------------------------------------------------- + * A = Android stream + * L = libcamera stream + * B = memory buffer + * FB = libcamera FrameBuffer + * "Processing" = Frame processing procedure (Encoding, scaling etc) + */ + enum class Type { + Direct, + Internal, + Mapped, + }; + CameraStream(CameraDevice *const cameraDevice, + libcamera::CameraConfiguration *config, Type type, + camera3_stream_t *camera3Stream, + CameraStream *const sourceStream, + unsigned int index); + CameraStream(CameraStream &&other); + ~CameraStream(); + + Type type() const { return type_; } + camera3_stream_t *camera3Stream() const { return camera3Stream_; } + const libcamera::StreamConfiguration &configuration() const; + libcamera::Stream *stream() const; + CameraStream *sourceStream() const { return sourceStream_; } + + int configure(); + int process(Camera3RequestDescriptor::StreamBuffer *streamBuffer); + libcamera::FrameBuffer *getBuffer(); + void putBuffer(libcamera::FrameBuffer *buffer); + void flush(); + +private: + class PostProcessorWorker : public libcamera::Thread + { + public: + enum class State { + Stopped, + Running, + Flushing, + }; + + PostProcessorWorker(PostProcessor *postProcessor); + ~PostProcessorWorker(); + + void start(); + void queueRequest(Camera3RequestDescriptor::StreamBuffer *request); + void flush(); + + protected: + void run() override; + + private: + PostProcessor *postProcessor_; + + libcamera::Mutex mutex_; + libcamera::ConditionVariable cv_; + + std::queue requests_ + LIBCAMERA_TSA_GUARDED_BY(mutex_); + + State state_ LIBCAMERA_TSA_GUARDED_BY(mutex_) = State::Stopped; + }; + + int waitFence(int fence); + + CameraDevice *const cameraDevice_; + const libcamera::CameraConfiguration *config_; + const Type type_; + camera3_stream_t *camera3Stream_; + CameraStream *const sourceStream_; + const unsigned int index_; + + std::unique_ptr allocator_; + std::vector> allocatedBuffers_; + std::vector buffers_ LIBCAMERA_TSA_GUARDED_BY(mutex_); + /* + * The class has to be MoveConstructible as instances are stored in + * an std::vector in CameraDevice. + */ + std::unique_ptr mutex_; + std::unique_ptr postProcessor_; + + std::unique_ptr worker_; +}; diff --git a/spider-cam/libcamera/src/android/cros/camera3_hal.cpp b/spider-cam/libcamera/src/android/cros/camera3_hal.cpp new file mode 100644 index 0000000..6010a5a --- /dev/null +++ b/spider-cam/libcamera/src/android/cros/camera3_hal.cpp @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * cros-specific components of Android Camera HALv3 module + */ + +#include + +#include "../camera_hal_manager.h" +#include "../cros_mojo_token.h" + +static void set_up(cros::CameraMojoChannelManagerToken *token) +{ + gCrosMojoToken = token; +} + +static void tear_down() +{ + delete CameraHalManager::instance(); +} + +cros::cros_camera_hal_t CROS_CAMERA_EXPORT CROS_CAMERA_HAL_INFO_SYM = { + .set_up = set_up, + .tear_down = tear_down +}; diff --git a/spider-cam/libcamera/src/android/cros/meson.build b/spider-cam/libcamera/src/android/cros/meson.build new file mode 100644 index 0000000..35995dd --- /dev/null +++ b/spider-cam/libcamera/src/android/cros/meson.build @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: CC0-1.0 + +if get_option('android_platform') != 'cros' + subdir_done() +endif + +android_hal_sources += files([ + 'camera3_hal.cpp', +]) + +android_deps += dependency('libcros_camera') + +android_cpp_args += ['-DOS_CHROMEOS'] diff --git a/spider-cam/libcamera/src/android/cros_mojo_token.h b/spider-cam/libcamera/src/android/cros_mojo_token.h new file mode 100644 index 0000000..d0baa80 --- /dev/null +++ b/spider-cam/libcamera/src/android/cros_mojo_token.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2022, Google Inc. + * + * cros-specific mojo token + */ + +#pragma once + +#include + +inline cros::CameraMojoChannelManagerToken *gCrosMojoToken = nullptr; diff --git a/spider-cam/libcamera/src/android/data/nautilus/camera_hal.yaml b/spider-cam/libcamera/src/android/data/nautilus/camera_hal.yaml new file mode 100644 index 0000000..2105fcc --- /dev/null +++ b/spider-cam/libcamera/src/android/data/nautilus/camera_hal.yaml @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: CC0-1.0 + +cameras: + "\\_SB_.PCI0.I2C2.CAM0": + location: back + rotation: 0 + + "\\_SB_.PCI0.XHCI.RHUB.HS09-9:1.0-04f2:b647": + location: front + rotation: 0 diff --git a/spider-cam/libcamera/src/android/data/soraka/camera_hal.yaml b/spider-cam/libcamera/src/android/data/soraka/camera_hal.yaml new file mode 100644 index 0000000..d886af0 --- /dev/null +++ b/spider-cam/libcamera/src/android/data/soraka/camera_hal.yaml @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: CC0-1.0 + +cameras: + "\\_SB_.PCI0.I2C4.CAM1": + location: front + rotation: 0 + + "\\_SB_.PCI0.I2C2.CAM0": + location: back + rotation: 0 diff --git a/spider-cam/libcamera/src/android/frame_buffer_allocator.h b/spider-cam/libcamera/src/android/frame_buffer_allocator.h new file mode 100644 index 0000000..3e68641 --- /dev/null +++ b/spider-cam/libcamera/src/android/frame_buffer_allocator.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Interface definition to allocate Frame buffer in + * platform dependent way. + */ +#ifndef __ANDROID_FRAME_BUFFER_ALLOCATOR_H__ +#define __ANDROID_FRAME_BUFFER_ALLOCATOR_H__ + +#include + +#include + +#include +#include + +#include "hal_framebuffer.h" + +class CameraDevice; + +class PlatformFrameBufferAllocator : libcamera::Extensible +{ + LIBCAMERA_DECLARE_PRIVATE() + +public: + explicit PlatformFrameBufferAllocator(CameraDevice *const cameraDevice); + ~PlatformFrameBufferAllocator(); + + /* + * FrameBuffer owns the underlying buffer. Returns nullptr on failure. + * Note: The returned FrameBuffer needs to be destroyed before + * PlatformFrameBufferAllocator is destroyed. + */ + std::unique_ptr allocate( + int halPixelFormat, const libcamera::Size &size, uint32_t usage); +}; + +#define PUBLIC_FRAME_BUFFER_ALLOCATOR_IMPLEMENTATION \ +PlatformFrameBufferAllocator::PlatformFrameBufferAllocator( \ + CameraDevice *const cameraDevice) \ + : Extensible(std::make_unique(cameraDevice)) \ +{ \ +} \ +PlatformFrameBufferAllocator::~PlatformFrameBufferAllocator() \ +{ \ +} \ +std::unique_ptr \ +PlatformFrameBufferAllocator::allocate(int halPixelFormat, \ + const libcamera::Size &size, \ + uint32_t usage) \ +{ \ + return _d()->allocate(halPixelFormat, size, usage); \ +} + +#endif /* __ANDROID_FRAME_BUFFER_ALLOCATOR_H__ */ diff --git a/spider-cam/libcamera/src/android/hal_framebuffer.cpp b/spider-cam/libcamera/src/android/hal_framebuffer.cpp new file mode 100644 index 0000000..d4899f4 --- /dev/null +++ b/spider-cam/libcamera/src/android/hal_framebuffer.cpp @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2022, Google Inc. + * + * HAL Frame Buffer Handling + */ + +#include "hal_framebuffer.h" + +#include + +HALFrameBuffer::HALFrameBuffer(std::unique_ptr d, + buffer_handle_t handle) + : FrameBuffer(std::move(d)), handle_(handle) +{ +} + +HALFrameBuffer::HALFrameBuffer(const std::vector &planes, + buffer_handle_t handle) + : FrameBuffer(planes), handle_(handle) +{ +} diff --git a/spider-cam/libcamera/src/android/hal_framebuffer.h b/spider-cam/libcamera/src/android/hal_framebuffer.h new file mode 100644 index 0000000..cea49e2 --- /dev/null +++ b/spider-cam/libcamera/src/android/hal_framebuffer.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2022, Google Inc. + * + * HAL Frame Buffer Handling + */ + +#pragma once + +#include "libcamera/internal/framebuffer.h" + +#include + +class HALFrameBuffer final : public libcamera::FrameBuffer +{ +public: + HALFrameBuffer(std::unique_ptr d, + buffer_handle_t handle); + HALFrameBuffer(const std::vector &planes, + buffer_handle_t handle); + + buffer_handle_t handle() const { return handle_; } + +private: + buffer_handle_t handle_; +}; diff --git a/spider-cam/libcamera/src/android/jpeg/encoder.h b/spider-cam/libcamera/src/android/jpeg/encoder.h new file mode 100644 index 0000000..ed033c1 --- /dev/null +++ b/spider-cam/libcamera/src/android/jpeg/encoder.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * Image encoding interface + */ + +#pragma once + +#include + +#include +#include + +#include "../camera_request.h" + +class Encoder +{ +public: + virtual ~Encoder() = default; + + virtual int configure(const libcamera::StreamConfiguration &cfg) = 0; + virtual int encode(Camera3RequestDescriptor::StreamBuffer *buffer, + libcamera::Span exifData, + unsigned int quality) = 0; +}; diff --git a/spider-cam/libcamera/src/android/jpeg/encoder_jea.cpp b/spider-cam/libcamera/src/android/jpeg/encoder_jea.cpp new file mode 100644 index 0000000..25dc431 --- /dev/null +++ b/spider-cam/libcamera/src/android/jpeg/encoder_jea.cpp @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2022, Google Inc. + * + * JPEG encoding using CrOS JEA + */ + +#include "encoder_jea.h" + +#include "libcamera/internal/mapped_framebuffer.h" + +#include + +#include "../cros_mojo_token.h" +#include "../hal_framebuffer.h" + +EncoderJea::EncoderJea() = default; + +EncoderJea::~EncoderJea() = default; + +int EncoderJea::configure(const libcamera::StreamConfiguration &cfg) +{ + size_ = cfg.size; + + if (jpegCompressor_) + return 0; + + if (gCrosMojoToken == nullptr) + return -ENOTSUP; + + jpegCompressor_ = cros::JpegCompressor::GetInstance(gCrosMojoToken); + + return 0; +} + +int EncoderJea::encode(Camera3RequestDescriptor::StreamBuffer *buffer, + libcamera::Span exifData, + unsigned int quality) +{ + if (!jpegCompressor_) + return -ENOTSUP; + + uint32_t outDataSize = 0; + const HALFrameBuffer *fb = + dynamic_cast(buffer->srcBuffer); + + if (!jpegCompressor_->CompressImageFromHandle(fb->handle(), + *buffer->camera3Buffer, + size_.width, size_.height, + quality, exifData.data(), + exifData.size(), + &outDataSize)) + return -EBUSY; + + return outDataSize; +} diff --git a/spider-cam/libcamera/src/android/jpeg/encoder_jea.h b/spider-cam/libcamera/src/android/jpeg/encoder_jea.h new file mode 100644 index 0000000..91115d2 --- /dev/null +++ b/spider-cam/libcamera/src/android/jpeg/encoder_jea.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2022, Google Inc. + * + * JPEG encoding using CrOS JEA + */ + +#pragma once + +#include + +#include + +#include "encoder.h" + +class EncoderJea : public Encoder +{ +public: + EncoderJea(); + ~EncoderJea(); + + int configure(const libcamera::StreamConfiguration &cfg) override; + int encode(Camera3RequestDescriptor::StreamBuffer *buffer, + libcamera::Span exifData, + unsigned int quality) override; + +private: + libcamera::Size size_; + + std::unique_ptr jpegCompressor_; +}; diff --git a/spider-cam/libcamera/src/android/jpeg/encoder_libjpeg.cpp b/spider-cam/libcamera/src/android/jpeg/encoder_libjpeg.cpp new file mode 100644 index 0000000..7fc6287 --- /dev/null +++ b/spider-cam/libcamera/src/android/jpeg/encoder_libjpeg.cpp @@ -0,0 +1,239 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * JPEG encoding using libjpeg native API + */ + +#include "encoder_libjpeg.h" + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include "libcamera/internal/formats.h" +#include "libcamera/internal/mapped_framebuffer.h" + +#include "../camera_buffer.h" + +using namespace libcamera; + +LOG_DECLARE_CATEGORY(JPEG) + +namespace { + +struct JPEGPixelFormatInfo { + J_COLOR_SPACE colorSpace; + const PixelFormatInfo &pixelFormatInfo; + bool nvSwap; +}; + +const std::map pixelInfo{ + { formats::R8, { JCS_GRAYSCALE, PixelFormatInfo::info(formats::R8), false } }, + + { formats::RGB888, { JCS_EXT_BGR, PixelFormatInfo::info(formats::RGB888), false } }, + { formats::BGR888, { JCS_EXT_RGB, PixelFormatInfo::info(formats::BGR888), false } }, + + { formats::NV12, { JCS_YCbCr, PixelFormatInfo::info(formats::NV12), false } }, + { formats::NV21, { JCS_YCbCr, PixelFormatInfo::info(formats::NV21), true } }, + { formats::NV16, { JCS_YCbCr, PixelFormatInfo::info(formats::NV16), false } }, + { formats::NV61, { JCS_YCbCr, PixelFormatInfo::info(formats::NV61), true } }, + { formats::NV24, { JCS_YCbCr, PixelFormatInfo::info(formats::NV24), false } }, + { formats::NV42, { JCS_YCbCr, PixelFormatInfo::info(formats::NV42), true } }, +}; + +const struct JPEGPixelFormatInfo &findPixelInfo(const PixelFormat &format) +{ + static const struct JPEGPixelFormatInfo invalidPixelFormat { + JCS_UNKNOWN, PixelFormatInfo(), false + }; + + const auto iter = pixelInfo.find(format); + if (iter == pixelInfo.end()) { + LOG(JPEG, Error) << "Unsupported pixel format for JPEG encoder: " + << format; + return invalidPixelFormat; + } + + return iter->second; +} + +} /* namespace */ + +EncoderLibJpeg::EncoderLibJpeg() +{ + /* \todo Expand error handling coverage with a custom handler. */ + compress_.err = jpeg_std_error(&jerr_); + + jpeg_create_compress(&compress_); +} + +EncoderLibJpeg::~EncoderLibJpeg() +{ + jpeg_destroy_compress(&compress_); +} + +int EncoderLibJpeg::configure(const StreamConfiguration &cfg) +{ + const struct JPEGPixelFormatInfo info = findPixelInfo(cfg.pixelFormat); + if (info.colorSpace == JCS_UNKNOWN) + return -ENOTSUP; + + compress_.image_width = cfg.size.width; + compress_.image_height = cfg.size.height; + compress_.in_color_space = info.colorSpace; + + compress_.input_components = info.colorSpace == JCS_GRAYSCALE ? 1 : 3; + + jpeg_set_defaults(&compress_); + + pixelFormatInfo_ = &info.pixelFormatInfo; + + nv_ = pixelFormatInfo_->numPlanes() == 2; + nvSwap_ = info.nvSwap; + + return 0; +} + +void EncoderLibJpeg::compressRGB(const std::vector> &planes) +{ + unsigned char *src = const_cast(planes[0].data()); + /* \todo Stride information should come from buffer configuration. */ + unsigned int stride = pixelFormatInfo_->stride(compress_.image_width, 0); + + JSAMPROW row_pointer[1]; + + while (compress_.next_scanline < compress_.image_height) { + row_pointer[0] = &src[compress_.next_scanline * stride]; + jpeg_write_scanlines(&compress_, row_pointer, 1); + } +} + +/* + * Compress the incoming buffer from a supported NV format. + * This naively unpacks the semi-planar NV12 to a YUV888 format for libjpeg. + */ +void EncoderLibJpeg::compressNV(const std::vector> &planes) +{ + uint8_t tmprowbuf[compress_.image_width * 3]; + + /* + * \todo Use the raw api, and only unpack the cb/cr samples to new line + * buffers. If possible, see if we can set appropriate pixel strides + * too to save even that copy. + * + * Possible hints at: + * https://sourceforge.net/p/libjpeg/mailman/message/30815123/ + */ + unsigned int y_stride = pixelFormatInfo_->stride(compress_.image_width, 0); + unsigned int c_stride = pixelFormatInfo_->stride(compress_.image_width, 1); + + unsigned int horzSubSample = 2 * compress_.image_width / c_stride; + unsigned int vertSubSample = pixelFormatInfo_->planes[1].verticalSubSampling; + + unsigned int c_inc = horzSubSample == 1 ? 2 : 0; + unsigned int cb_pos = nvSwap_ ? 1 : 0; + unsigned int cr_pos = nvSwap_ ? 0 : 1; + + const unsigned char *src = planes[0].data(); + const unsigned char *src_c = planes[1].data(); + + JSAMPROW row_pointer[1]; + row_pointer[0] = &tmprowbuf[0]; + + for (unsigned int y = 0; y < compress_.image_height; y++) { + unsigned char *dst = &tmprowbuf[0]; + + const unsigned char *src_y = src + y * y_stride; + const unsigned char *src_cb = src_c + (y / vertSubSample) * c_stride + cb_pos; + const unsigned char *src_cr = src_c + (y / vertSubSample) * c_stride + cr_pos; + + for (unsigned int x = 0; x < compress_.image_width; x += 2) { + dst[0] = *src_y; + dst[1] = *src_cb; + dst[2] = *src_cr; + src_y++; + src_cb += c_inc; + src_cr += c_inc; + dst += 3; + + dst[0] = *src_y; + dst[1] = *src_cb; + dst[2] = *src_cr; + src_y++; + src_cb += 2; + src_cr += 2; + dst += 3; + } + + jpeg_write_scanlines(&compress_, row_pointer, 1); + } +} + +int EncoderLibJpeg::encode(Camera3RequestDescriptor::StreamBuffer *buffer, + libcamera::Span exifData, + unsigned int quality) +{ + MappedFrameBuffer frame(buffer->srcBuffer, + MappedFrameBuffer::MapFlag::Read); + if (!frame.isValid()) { + LOG(JPEG, Error) << "Failed to map FrameBuffer : " + << strerror(frame.error()); + return frame.error(); + } + + return encode(frame.planes(), buffer->dstBuffer->plane(0), + exifData, quality); +} + +int EncoderLibJpeg::encode(const std::vector> &src, + Span dest, Span exifData, + unsigned int quality) +{ + unsigned char *destination = dest.data(); + unsigned long size = dest.size(); + + jpeg_set_quality(&compress_, quality, TRUE); + + /* + * The jpeg_mem_dest will reallocate if the required size is not + * sufficient. That means the output won't be written to the correct + * buffers. + * + * \todo Implement our own custom memory destination to prevent + * reallocation and prefer failure with correct reporting. + */ + jpeg_mem_dest(&compress_, &destination, &size); + + jpeg_start_compress(&compress_, TRUE); + + if (exifData.size()) + /* Store Exif data in the JPEG_APP1 data block. */ + jpeg_write_marker(&compress_, JPEG_APP0 + 1, + static_cast(exifData.data()), + exifData.size()); + + LOG(JPEG, Debug) << "JPEG Encode Starting:" << compress_.image_width + << "x" << compress_.image_height; + + ASSERT(src.size() == pixelFormatInfo_->numPlanes()); + + if (nv_) + compressNV(src); + else + compressRGB(src); + + jpeg_finish_compress(&compress_); + + return size; +} diff --git a/spider-cam/libcamera/src/android/jpeg/encoder_libjpeg.h b/spider-cam/libcamera/src/android/jpeg/encoder_libjpeg.h new file mode 100644 index 0000000..4ac85c2 --- /dev/null +++ b/spider-cam/libcamera/src/android/jpeg/encoder_libjpeg.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * JPEG encoding using libjpeg + */ + +#pragma once + +#include "encoder.h" + +#include + +#include "libcamera/internal/formats.h" + +#include + +class EncoderLibJpeg : public Encoder +{ +public: + EncoderLibJpeg(); + ~EncoderLibJpeg(); + + int configure(const libcamera::StreamConfiguration &cfg) override; + int encode(Camera3RequestDescriptor::StreamBuffer *buffer, + libcamera::Span exifData, + unsigned int quality) override; + int encode(const std::vector> &planes, + libcamera::Span destination, + libcamera::Span exifData, + unsigned int quality); + +private: + void compressRGB(const std::vector> &planes); + void compressNV(const std::vector> &planes); + + struct jpeg_compress_struct compress_; + struct jpeg_error_mgr jerr_; + + const libcamera::PixelFormatInfo *pixelFormatInfo_; + + bool nv_; + bool nvSwap_; +}; diff --git a/spider-cam/libcamera/src/android/jpeg/exif.cpp b/spider-cam/libcamera/src/android/jpeg/exif.cpp new file mode 100644 index 0000000..b8c871d --- /dev/null +++ b/spider-cam/libcamera/src/android/jpeg/exif.cpp @@ -0,0 +1,522 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * EXIF tag creation using libexif + */ + +#include "exif.h" + +#include +#include +#include +#include +#include +#include + +#include +#include + +using namespace libcamera; + +LOG_DEFINE_CATEGORY(EXIF) + +/* + * List of EXIF tags that we set directly because they are not supported + * by libexif version 0.6.21. + */ +enum class _ExifTag { + OFFSET_TIME = 0x9010, + OFFSET_TIME_ORIGINAL = 0x9011, + OFFSET_TIME_DIGITIZED = 0x9012, +}; + +/* + * The Exif class should be instantiated and specific properties set + * through the exposed public API. + * + * Once all desired properties have been set, the user shall call + * generate() to process the entries and generate the Exif data. + * + * Calls to generate() must check the return code to determine if any error + * occurred during the construction of the Exif data, and if successful the + * data can be obtained using the data() function. + */ +Exif::Exif() + : valid_(false), data_(nullptr), order_(EXIF_BYTE_ORDER_INTEL), + exifData_(0), size_(0) +{ + /* Create an ExifMem allocator to construct entries. */ + mem_ = exif_mem_new_default(); + if (!mem_) { + LOG(EXIF, Error) << "Failed to allocate ExifMem Allocator"; + return; + } + + data_ = exif_data_new_mem(mem_); + if (!data_) { + LOG(EXIF, Error) << "Failed to allocate an ExifData structure"; + return; + } + + valid_ = true; + + exif_data_set_option(data_, EXIF_DATA_OPTION_FOLLOW_SPECIFICATION); + exif_data_set_data_type(data_, EXIF_DATA_TYPE_COMPRESSED); + + /* + * Big-Endian: EXIF_BYTE_ORDER_MOTOROLA + * Little Endian: EXIF_BYTE_ORDER_INTEL + */ + exif_data_set_byte_order(data_, order_); + + setString(EXIF_IFD_EXIF, EXIF_TAG_EXIF_VERSION, + EXIF_FORMAT_UNDEFINED, "0231"); + + /* Create the mandatory EXIF fields with default data. */ + exif_data_fix(data_); +} + +Exif::~Exif() +{ + if (exifData_) + free(exifData_); + + if (data_) { + /* + * Reset thumbnail data to avoid getting double-freed by + * libexif. It is owned by the caller (i.e. PostProcessorJpeg). + */ + data_->data = nullptr; + data_->size = 0; + + exif_data_unref(data_); + } + + if (mem_) + exif_mem_unref(mem_); +} + +ExifEntry *Exif::createEntry(ExifIfd ifd, ExifTag tag) +{ + ExifContent *content = data_->ifd[ifd]; + ExifEntry *entry = exif_content_get_entry(content, tag); + + if (entry) { + exif_entry_ref(entry); + return entry; + } + + entry = exif_entry_new_mem(mem_); + if (!entry) { + LOG(EXIF, Error) << "Failed to allocated new entry"; + valid_ = false; + return nullptr; + } + + exif_content_add_entry(content, entry); + exif_entry_initialize(entry, tag); + + return entry; +} + +ExifEntry *Exif::createEntry(ExifIfd ifd, ExifTag tag, ExifFormat format, + unsigned long components, unsigned int size) +{ + ExifContent *content = data_->ifd[ifd]; + + /* Replace any existing entry with the same tag. */ + ExifEntry *existing = exif_content_get_entry(content, tag); + exif_content_remove_entry(content, existing); + + ExifEntry *entry = exif_entry_new_mem(mem_); + if (!entry) { + LOG(EXIF, Error) << "Failed to allocated new entry"; + valid_ = false; + return nullptr; + } + + void *buffer = exif_mem_alloc(mem_, size); + if (!buffer) { + LOG(EXIF, Error) << "Failed to allocate buffer for variable entry"; + exif_mem_unref(mem_); + valid_ = false; + return nullptr; + } + + entry->data = static_cast(buffer); + entry->components = components; + entry->format = format; + entry->size = size; + entry->tag = tag; + + exif_content_add_entry(content, entry); + + return entry; +} + +void Exif::setByte(ExifIfd ifd, ExifTag tag, uint8_t item) +{ + ExifEntry *entry = createEntry(ifd, tag, EXIF_FORMAT_BYTE, 1, 1); + if (!entry) + return; + + entry->data[0] = item; + exif_entry_unref(entry); +} + +void Exif::setShort(ExifIfd ifd, ExifTag tag, uint16_t item) +{ + ExifEntry *entry = createEntry(ifd, tag); + if (!entry) + return; + + exif_set_short(entry->data, order_, item); + exif_entry_unref(entry); +} + +void Exif::setLong(ExifIfd ifd, ExifTag tag, uint32_t item) +{ + ExifEntry *entry = createEntry(ifd, tag); + if (!entry) + return; + + exif_set_long(entry->data, order_, item); + exif_entry_unref(entry); +} + +void Exif::setRational(ExifIfd ifd, ExifTag tag, ExifRational item) +{ + setRational(ifd, tag, { &item, 1 }); +} + +void Exif::setRational(ExifIfd ifd, ExifTag tag, Span items) +{ + ExifEntry *entry = createEntry(ifd, tag, EXIF_FORMAT_RATIONAL, + items.size(), + items.size() * sizeof(ExifRational)); + if (!entry) + return; + + for (size_t i = 0; i < items.size(); i++) + exif_set_rational(entry->data + i * sizeof(ExifRational), + order_, items[i]); + exif_entry_unref(entry); +} + +static const std::map> stringEncodingCodes = { + { Exif::ASCII, { 0x41, 0x53, 0x43, 0x49, 0x49, 0x00, 0x00, 0x00 } }, + { Exif::Unicode, { 0x55, 0x4e, 0x49, 0x43, 0x4f, 0x44, 0x45, 0x00 } }, +}; + +void Exif::setString(ExifIfd ifd, ExifTag tag, ExifFormat format, + const std::string &item, StringEncoding encoding) +{ + std::string ascii; + size_t length; + const char *str; + std::vector buf; + + if (format == EXIF_FORMAT_ASCII) { + ascii = utils::toAscii(item); + str = ascii.c_str(); + + /* Pad 1 extra byte to null-terminate the ASCII string. */ + length = ascii.length() + 1; + } else { + std::u16string u16str; + + auto encodingString = stringEncodingCodes.find(encoding); + if (encodingString != stringEncodingCodes.end()) { + buf = { + encodingString->second.begin(), + encodingString->second.end() + }; + } + + switch (encoding) { + case Unicode: + u16str = utf8ToUtf16(item); + + buf.resize(8 + u16str.size() * 2); + for (size_t i = 0; i < u16str.size(); i++) { + if (order_ == EXIF_BYTE_ORDER_INTEL) { + buf[8 + 2 * i] = u16str[i] & 0xff; + buf[8 + 2 * i + 1] = (u16str[i] >> 8) & 0xff; + } else { + buf[8 + 2 * i] = (u16str[i] >> 8) & 0xff; + buf[8 + 2 * i + 1] = u16str[i] & 0xff; + } + } + + break; + + case ASCII: + case NoEncoding: + buf.insert(buf.end(), item.begin(), item.end()); + break; + } + + str = reinterpret_cast(buf.data()); + + /* + * Strings stored in different formats (EXIF_FORMAT_UNDEFINED) + * are not null-terminated. + */ + length = buf.size(); + } + + ExifEntry *entry = createEntry(ifd, tag, format, length, length); + if (!entry) + return; + + memcpy(entry->data, str, length); + exif_entry_unref(entry); +} + +void Exif::setMake(const std::string &make) +{ + setString(EXIF_IFD_0, EXIF_TAG_MAKE, EXIF_FORMAT_ASCII, make); +} + +void Exif::setModel(const std::string &model) +{ + setString(EXIF_IFD_0, EXIF_TAG_MODEL, EXIF_FORMAT_ASCII, model); +} + +void Exif::setSize(const Size &size) +{ + setLong(EXIF_IFD_EXIF, EXIF_TAG_PIXEL_Y_DIMENSION, size.height); + setLong(EXIF_IFD_EXIF, EXIF_TAG_PIXEL_X_DIMENSION, size.width); +} + +void Exif::setTimestamp(time_t timestamp, std::chrono::milliseconds msec) +{ + struct tm tm; + localtime_r(×tamp, &tm); + + char str[20]; + strftime(str, sizeof(str), "%Y:%m:%d %H:%M:%S", &tm); + std::string ts(str); + + setString(EXIF_IFD_0, EXIF_TAG_DATE_TIME, EXIF_FORMAT_ASCII, ts); + setString(EXIF_IFD_EXIF, EXIF_TAG_DATE_TIME_ORIGINAL, EXIF_FORMAT_ASCII, ts); + setString(EXIF_IFD_EXIF, EXIF_TAG_DATE_TIME_DIGITIZED, EXIF_FORMAT_ASCII, ts); + + /* Query and set timezone information if available. */ + int r = strftime(str, sizeof(str), "%z", &tm); + if (r <= 0) + return; + + std::string tz(str); + tz.insert(3, 1, ':'); + setString(EXIF_IFD_EXIF, + static_cast(_ExifTag::OFFSET_TIME), + EXIF_FORMAT_ASCII, tz); + setString(EXIF_IFD_EXIF, + static_cast(_ExifTag::OFFSET_TIME_ORIGINAL), + EXIF_FORMAT_ASCII, tz); + setString(EXIF_IFD_EXIF, + static_cast(_ExifTag::OFFSET_TIME_DIGITIZED), + EXIF_FORMAT_ASCII, tz); + + std::stringstream sstr; + sstr << std::setfill('0') << std::setw(3) << msec.count(); + std::string subsec = sstr.str(); + + setString(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME, + EXIF_FORMAT_ASCII, subsec); + setString(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME_ORIGINAL, + EXIF_FORMAT_ASCII, subsec); + setString(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME_DIGITIZED, + EXIF_FORMAT_ASCII, subsec); +} + +void Exif::setGPSDateTimestamp(time_t timestamp) +{ + struct tm tm; + gmtime_r(×tamp, &tm); + + char str[11]; + strftime(str, sizeof(str), "%Y:%m:%d", &tm); + std::string tsStr(str); + + setString(EXIF_IFD_GPS, static_cast(EXIF_TAG_GPS_DATE_STAMP), + EXIF_FORMAT_ASCII, tsStr); + + /* Set GPS_TIME_STAMP */ + ExifRational ts[] = { + { static_cast(tm.tm_hour), 1 }, + { static_cast(tm.tm_min), 1 }, + { static_cast(tm.tm_sec), 1 }, + }; + + setRational(EXIF_IFD_GPS, static_cast(EXIF_TAG_GPS_TIME_STAMP), + ts); +} + +std::tuple Exif::degreesToDMS(double decimalDegrees) +{ + int degrees = std::trunc(decimalDegrees); + double minutes = std::abs((decimalDegrees - degrees) * 60); + double seconds = (minutes - std::trunc(minutes)) * 60; + + return { degrees, std::trunc(minutes), std::round(seconds) }; +} + +void Exif::setGPSDMS(ExifIfd ifd, ExifTag tag, int deg, int min, int sec) +{ + ExifRational coords[] = { + { static_cast(deg), 1 }, + { static_cast(min), 1 }, + { static_cast(sec), 1 }, + }; + + setRational(ifd, tag, coords); +} + +/* + * \brief Set GPS location (lat, long, alt) + * \param[in] coords Pointer to coordinates latitude, longitude, and altitude, + * first two in degrees, the third in meters + */ +void Exif::setGPSLocation(const double *coords) +{ + int deg, min, sec; + + std::tie(deg, min, sec) = degreesToDMS(coords[0]); + setString(EXIF_IFD_GPS, static_cast(EXIF_TAG_GPS_LATITUDE_REF), + EXIF_FORMAT_ASCII, deg >= 0 ? "N" : "S"); + setGPSDMS(EXIF_IFD_GPS, static_cast(EXIF_TAG_GPS_LATITUDE), + std::abs(deg), min, sec); + + std::tie(deg, min, sec) = degreesToDMS(coords[1]); + setString(EXIF_IFD_GPS, static_cast(EXIF_TAG_GPS_LONGITUDE_REF), + EXIF_FORMAT_ASCII, deg >= 0 ? "E" : "W"); + setGPSDMS(EXIF_IFD_GPS, static_cast(EXIF_TAG_GPS_LONGITUDE), + std::abs(deg), min, sec); + + setByte(EXIF_IFD_GPS, static_cast(EXIF_TAG_GPS_ALTITUDE_REF), + coords[2] >= 0 ? 0 : 1); + setRational(EXIF_IFD_GPS, static_cast(EXIF_TAG_GPS_ALTITUDE), + ExifRational{ static_cast(std::abs(coords[2])), 1 }); +} + +void Exif::setGPSMethod(const std::string &method) +{ + setString(EXIF_IFD_GPS, static_cast(EXIF_TAG_GPS_PROCESSING_METHOD), + EXIF_FORMAT_UNDEFINED, method, NoEncoding); +} + +void Exif::setOrientation(int orientation) +{ + int value; + switch (orientation) { + case 0: + default: + value = 1; + break; + case 90: + value = 6; + break; + case 180: + value = 3; + break; + case 270: + value = 8; + break; + } + + setShort(EXIF_IFD_0, EXIF_TAG_ORIENTATION, value); +} + +void Exif::setThumbnail(std::vector &&thumbnail, + Compression compression) +{ + thumbnailData_ = std::move(thumbnail); + + data_->data = thumbnailData_.data(); + data_->size = thumbnailData_.size(); + + setShort(EXIF_IFD_0, EXIF_TAG_COMPRESSION, compression); +} + +void Exif::setFocalLength(float length) +{ + ExifRational rational = { static_cast(length * 1000), 1000 }; + setRational(EXIF_IFD_EXIF, EXIF_TAG_FOCAL_LENGTH, rational); +} + +void Exif::setExposureTime(uint64_t nsec) +{ + ExifRational rational = { static_cast(nsec), 1000000000 }; + setRational(EXIF_IFD_EXIF, EXIF_TAG_EXPOSURE_TIME, rational); +} + +void Exif::setAperture(float size) +{ + ExifRational rational = { static_cast(size * 10000), 10000 }; + setRational(EXIF_IFD_EXIF, EXIF_TAG_FNUMBER, rational); +} + +void Exif::setISO(uint16_t iso) +{ + setShort(EXIF_IFD_EXIF, EXIF_TAG_ISO_SPEED_RATINGS, iso); +} + +void Exif::setFlash(Flash flash) +{ + setShort(EXIF_IFD_EXIF, EXIF_TAG_FLASH, static_cast(flash)); +} + +void Exif::setWhiteBalance(WhiteBalance wb) +{ + setShort(EXIF_IFD_EXIF, EXIF_TAG_WHITE_BALANCE, static_cast(wb)); +} + +/** + * \brief Convert UTF-8 string to UTF-16 string + * \param[in] str String to convert + * + * \return \a str in UTF-16 + */ +std::u16string Exif::utf8ToUtf16(const std::string &str) +{ + mbstate_t state{}; + char16_t c16; + const char *ptr = str.data(); + const char *end = ptr + str.size(); + + std::u16string ret; + while (size_t rc = mbrtoc16(&c16, ptr, end - ptr + 1, &state)) { + if (rc == static_cast(-2) || + rc == static_cast(-1)) + break; + + ret.push_back(c16); + + if (rc > 0) + ptr += rc; + } + + return ret; +} + +[[nodiscard]] int Exif::generate() +{ + if (exifData_) { + free(exifData_); + exifData_ = nullptr; + } + + if (!valid_) { + LOG(EXIF, Error) << "Generated EXIF data is invalid"; + return -1; + } + + exif_data_save_data(data_, &exifData_, &size_); + + LOG(EXIF, Debug) << "Created EXIF instance (" << size_ << " bytes)"; + + return 0; +} diff --git a/spider-cam/libcamera/src/android/jpeg/exif.h b/spider-cam/libcamera/src/android/jpeg/exif.h new file mode 100644 index 0000000..446d53f --- /dev/null +++ b/spider-cam/libcamera/src/android/jpeg/exif.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * EXIF tag creator using libexif + */ + +#pragma once + +#include +#include +#include +#include + +#include + +#include + +#include + +class Exif +{ +public: + Exif(); + ~Exif(); + + enum Compression { + None = 1, + JPEG = 6, + }; + + enum Flash { + /* bit 0 */ + Fired = 0x01, + /* bits 1 and 2 */ + StrobeDetected = 0x04, + StrobeNotDetected = 0x06, + /* bits 3 and 4 */ + ModeCompulsoryFiring = 0x08, + ModeCompulsorySuppression = 0x10, + ModeAuto = 0x18, + /* bit 5 */ + FlashNotPresent = 0x20, + /* bit 6 */ + RedEye = 0x40, + }; + + enum WhiteBalance { + Auto = 0, + Manual = 1, + }; + + enum StringEncoding { + NoEncoding = 0, + ASCII = 1, + Unicode = 2, + }; + + void setMake(const std::string &make); + void setModel(const std::string &model); + + void setOrientation(int orientation); + void setSize(const libcamera::Size &size); + void setThumbnail(std::vector &&thumbnail, + Compression compression); + void setTimestamp(time_t timestamp, std::chrono::milliseconds msec); + + void setGPSDateTimestamp(time_t timestamp); + void setGPSLocation(const double *coords); + void setGPSMethod(const std::string &method); + + void setFocalLength(float length); + void setExposureTime(uint64_t nsec); + void setAperture(float size); + void setISO(uint16_t iso); + void setFlash(Flash flash); + void setWhiteBalance(WhiteBalance wb); + + libcamera::Span data() const { return { exifData_, size_ }; } + [[nodiscard]] int generate(); + +private: + ExifEntry *createEntry(ExifIfd ifd, ExifTag tag); + ExifEntry *createEntry(ExifIfd ifd, ExifTag tag, ExifFormat format, + unsigned long components, unsigned int size); + + void setByte(ExifIfd ifd, ExifTag tag, uint8_t item); + void setShort(ExifIfd ifd, ExifTag tag, uint16_t item); + void setLong(ExifIfd ifd, ExifTag tag, uint32_t item); + void setString(ExifIfd ifd, ExifTag tag, ExifFormat format, + const std::string &item, + StringEncoding encoding = NoEncoding); + void setRational(ExifIfd ifd, ExifTag tag, ExifRational item); + void setRational(ExifIfd ifd, ExifTag tag, + libcamera::Span items); + + std::tuple degreesToDMS(double decimalDegrees); + void setGPSDMS(ExifIfd ifd, ExifTag tag, int deg, int min, int sec); + + std::u16string utf8ToUtf16(const std::string &str); + + bool valid_; + + ExifData *data_; + ExifMem *mem_; + ExifByteOrder order_; + + unsigned char *exifData_; + unsigned int size_; + + std::vector thumbnailData_; +}; diff --git a/spider-cam/libcamera/src/android/jpeg/meson.build b/spider-cam/libcamera/src/android/jpeg/meson.build new file mode 100644 index 0000000..3402e61 --- /dev/null +++ b/spider-cam/libcamera/src/android/jpeg/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: CC0-1.0 + +android_hal_sources += files([ + 'encoder_libjpeg.cpp', + 'exif.cpp', + 'post_processor_jpeg.cpp', + 'thumbnailer.cpp' +]) + +platform = get_option('android_platform') +if platform == 'cros' + android_hal_sources += files(['encoder_jea.cpp']) + android_deps += [dependency('libcros_camera')] +endif diff --git a/spider-cam/libcamera/src/android/jpeg/post_processor_jpeg.cpp b/spider-cam/libcamera/src/android/jpeg/post_processor_jpeg.cpp new file mode 100644 index 0000000..89b8a40 --- /dev/null +++ b/spider-cam/libcamera/src/android/jpeg/post_processor_jpeg.cpp @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * JPEG Post Processor + */ + +#include "post_processor_jpeg.h" + +#include + +#include "../camera_device.h" +#include "../camera_metadata.h" +#include "../camera_request.h" +#if defined(OS_CHROMEOS) +#include "encoder_jea.h" +#else /* !defined(OS_CHROMEOS) */ +#include "encoder_libjpeg.h" +#endif +#include "exif.h" + +#include + +#include + +using namespace libcamera; +using namespace std::chrono_literals; + +LOG_DEFINE_CATEGORY(JPEG) + +PostProcessorJpeg::PostProcessorJpeg(CameraDevice *const device) + : cameraDevice_(device) +{ +} + +int PostProcessorJpeg::configure(const StreamConfiguration &inCfg, + const StreamConfiguration &outCfg) +{ + if (inCfg.size != outCfg.size) { + LOG(JPEG, Error) << "Mismatch of input and output stream sizes"; + return -EINVAL; + } + + if (outCfg.pixelFormat != formats::MJPEG) { + LOG(JPEG, Error) << "Output stream pixel format is not JPEG"; + return -EINVAL; + } + + streamSize_ = outCfg.size; + + thumbnailer_.configure(inCfg.size, inCfg.pixelFormat); + +#if defined(OS_CHROMEOS) + encoder_ = std::make_unique(); +#else /* !defined(OS_CHROMEOS) */ + encoder_ = std::make_unique(); +#endif + + return encoder_->configure(inCfg); +} + +void PostProcessorJpeg::generateThumbnail(const FrameBuffer &source, + const Size &targetSize, + unsigned int quality, + std::vector *thumbnail) +{ + /* Stores the raw scaled-down thumbnail bytes. */ + std::vector rawThumbnail; + + thumbnailer_.createThumbnail(source, targetSize, &rawThumbnail); + + StreamConfiguration thCfg; + thCfg.size = targetSize; + thCfg.pixelFormat = thumbnailer_.pixelFormat(); + int ret = thumbnailEncoder_.configure(thCfg); + + if (!rawThumbnail.empty() && !ret) { + /* + * \todo Avoid value-initialization of all elements of the + * vector. + */ + thumbnail->resize(rawThumbnail.size()); + + /* + * Split planes manually as the encoder expects a vector of + * planes. + * + * \todo Pass a vector of planes directly to + * Thumbnailer::createThumbnailer above and remove the manual + * planes split from here. + */ + std::vector> thumbnailPlanes; + const PixelFormatInfo &formatNV12 = PixelFormatInfo::info(formats::NV12); + size_t yPlaneSize = formatNV12.planeSize(targetSize, 0); + size_t uvPlaneSize = formatNV12.planeSize(targetSize, 1); + thumbnailPlanes.push_back({ rawThumbnail.data(), yPlaneSize }); + thumbnailPlanes.push_back({ rawThumbnail.data() + yPlaneSize, uvPlaneSize }); + + int jpeg_size = thumbnailEncoder_.encode(thumbnailPlanes, + *thumbnail, {}, quality); + thumbnail->resize(jpeg_size); + + LOG(JPEG, Debug) + << "Thumbnail compress returned " + << jpeg_size << " bytes"; + } +} + +void PostProcessorJpeg::process(Camera3RequestDescriptor::StreamBuffer *streamBuffer) +{ + ASSERT(encoder_); + + const FrameBuffer &source = *streamBuffer->srcBuffer; + CameraBuffer *destination = streamBuffer->dstBuffer.get(); + + ASSERT(destination->numPlanes() == 1); + + const CameraMetadata &requestMetadata = streamBuffer->request->settings_; + CameraMetadata *resultMetadata = streamBuffer->request->resultMetadata_.get(); + camera_metadata_ro_entry_t entry; + int ret; + + /* Set EXIF metadata for various tags. */ + Exif exif; + exif.setMake(cameraDevice_->maker()); + exif.setModel(cameraDevice_->model()); + + ret = requestMetadata.getEntry(ANDROID_JPEG_ORIENTATION, &entry); + + const uint32_t jpegOrientation = ret ? *entry.data.i32 : 0; + resultMetadata->addEntry(ANDROID_JPEG_ORIENTATION, jpegOrientation); + exif.setOrientation(jpegOrientation); + + exif.setSize(streamSize_); + /* + * We set the frame's EXIF timestamp as the time of encode. + * Since the precision we need for EXIF timestamp is only one + * second, it is good enough. + */ + exif.setTimestamp(std::time(nullptr), 0ms); + + ret = resultMetadata->getEntry(ANDROID_SENSOR_EXPOSURE_TIME, &entry); + exif.setExposureTime(ret ? *entry.data.i64 : 0); + ret = requestMetadata.getEntry(ANDROID_LENS_APERTURE, &entry); + if (ret) + exif.setAperture(*entry.data.f); + + ret = resultMetadata->getEntry(ANDROID_SENSOR_SENSITIVITY, &entry); + exif.setISO(ret ? *entry.data.i32 : 100); + + exif.setFlash(Exif::Flash::FlashNotPresent); + exif.setWhiteBalance(Exif::WhiteBalance::Auto); + + exif.setFocalLength(1.0); + + ret = requestMetadata.getEntry(ANDROID_JPEG_GPS_TIMESTAMP, &entry); + if (ret) { + exif.setGPSDateTimestamp(*entry.data.i64); + resultMetadata->addEntry(ANDROID_JPEG_GPS_TIMESTAMP, + *entry.data.i64); + } + + ret = requestMetadata.getEntry(ANDROID_JPEG_THUMBNAIL_SIZE, &entry); + if (ret) { + const int32_t *data = entry.data.i32; + Size thumbnailSize = { static_cast(data[0]), + static_cast(data[1]) }; + + ret = requestMetadata.getEntry(ANDROID_JPEG_THUMBNAIL_QUALITY, &entry); + uint8_t quality = ret ? *entry.data.u8 : 95; + resultMetadata->addEntry(ANDROID_JPEG_THUMBNAIL_QUALITY, quality); + + if (thumbnailSize != Size(0, 0)) { + std::vector thumbnail; + generateThumbnail(source, thumbnailSize, quality, &thumbnail); + if (!thumbnail.empty()) + exif.setThumbnail(std::move(thumbnail), Exif::Compression::JPEG); + } + + resultMetadata->addEntry(ANDROID_JPEG_THUMBNAIL_SIZE, data, 2); + } + + ret = requestMetadata.getEntry(ANDROID_JPEG_GPS_COORDINATES, &entry); + if (ret) { + exif.setGPSLocation(entry.data.d); + resultMetadata->addEntry(ANDROID_JPEG_GPS_COORDINATES, + entry.data.d, 3); + } + + ret = requestMetadata.getEntry(ANDROID_JPEG_GPS_PROCESSING_METHOD, &entry); + if (ret) { + std::string method(entry.data.u8, entry.data.u8 + entry.count); + exif.setGPSMethod(method); + resultMetadata->addEntry(ANDROID_JPEG_GPS_PROCESSING_METHOD, + entry.data.u8, entry.count); + } + + if (exif.generate() != 0) + LOG(JPEG, Error) << "Failed to generate valid EXIF data"; + + ret = requestMetadata.getEntry(ANDROID_JPEG_QUALITY, &entry); + const uint8_t quality = ret ? *entry.data.u8 : 95; + resultMetadata->addEntry(ANDROID_JPEG_QUALITY, quality); + + int jpeg_size = encoder_->encode(streamBuffer, exif.data(), quality); + if (jpeg_size < 0) { + LOG(JPEG, Error) << "Failed to encode stream image"; + processComplete.emit(streamBuffer, PostProcessor::Status::Error); + return; + } + + /* Fill in the JPEG blob header. */ + uint8_t *resultPtr = destination->plane(0).data() + + destination->jpegBufferSize(cameraDevice_->maxJpegBufferSize()) + - sizeof(struct camera3_jpeg_blob); + auto *blob = reinterpret_cast(resultPtr); + blob->jpeg_blob_id = CAMERA3_JPEG_BLOB_ID; + blob->jpeg_size = jpeg_size; + + /* Update the JPEG result Metadata. */ + resultMetadata->addEntry(ANDROID_JPEG_SIZE, jpeg_size); + processComplete.emit(streamBuffer, PostProcessor::Status::Success); +} diff --git a/spider-cam/libcamera/src/android/jpeg/post_processor_jpeg.h b/spider-cam/libcamera/src/android/jpeg/post_processor_jpeg.h new file mode 100644 index 0000000..6fe2145 --- /dev/null +++ b/spider-cam/libcamera/src/android/jpeg/post_processor_jpeg.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * JPEG Post Processor + */ + +#pragma once + +#include "../post_processor.h" +#include "encoder_libjpeg.h" +#include "thumbnailer.h" + +#include + +class CameraDevice; + +class PostProcessorJpeg : public PostProcessor +{ +public: + PostProcessorJpeg(CameraDevice *const device); + + int configure(const libcamera::StreamConfiguration &incfg, + const libcamera::StreamConfiguration &outcfg) override; + void process(Camera3RequestDescriptor::StreamBuffer *streamBuffer) override; + +private: + void generateThumbnail(const libcamera::FrameBuffer &source, + const libcamera::Size &targetSize, + unsigned int quality, + std::vector *thumbnail); + + CameraDevice *const cameraDevice_; + std::unique_ptr encoder_; + libcamera::Size streamSize_; + EncoderLibJpeg thumbnailEncoder_; + Thumbnailer thumbnailer_; +}; diff --git a/spider-cam/libcamera/src/android/jpeg/thumbnailer.cpp b/spider-cam/libcamera/src/android/jpeg/thumbnailer.cpp new file mode 100644 index 0000000..adafc46 --- /dev/null +++ b/spider-cam/libcamera/src/android/jpeg/thumbnailer.cpp @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * Simple image thumbnailer + */ + +#include "thumbnailer.h" + +#include + +#include + +#include "libcamera/internal/mapped_framebuffer.h" + +using namespace libcamera; + +LOG_DEFINE_CATEGORY(Thumbnailer) + +Thumbnailer::Thumbnailer() + : valid_(false) +{ +} + +void Thumbnailer::configure(const Size &sourceSize, PixelFormat pixelFormat) +{ + sourceSize_ = sourceSize; + pixelFormat_ = pixelFormat; + + if (pixelFormat_ != formats::NV12) { + LOG(Thumbnailer, Error) + << "Failed to configure: Pixel Format " + << pixelFormat_ << " unsupported."; + return; + } + + valid_ = true; +} + +void Thumbnailer::createThumbnail(const FrameBuffer &source, + const Size &targetSize, + std::vector *destination) +{ + MappedFrameBuffer frame(&source, MappedFrameBuffer::MapFlag::Read); + if (!frame.isValid()) { + LOG(Thumbnailer, Error) + << "Failed to map FrameBuffer : " + << strerror(frame.error()); + return; + } + + if (!valid_) { + LOG(Thumbnailer, Error) << "Config is unconfigured or invalid."; + return; + } + + const unsigned int sw = sourceSize_.width; + const unsigned int sh = sourceSize_.height; + const unsigned int tw = targetSize.width; + const unsigned int th = targetSize.height; + + ASSERT(frame.planes().size() == 2); + ASSERT(tw % 2 == 0 && th % 2 == 0); + + /* Image scaling block implementing nearest-neighbour algorithm. */ + unsigned char *src = frame.planes()[0].data(); + unsigned char *srcC = frame.planes()[1].data(); + unsigned char *srcCb, *srcCr; + unsigned char *dstY, *srcY; + + size_t dstSize = (th * tw) + ((th / 2) * tw); + destination->resize(dstSize); + unsigned char *dst = destination->data(); + unsigned char *dstC = dst + th * tw; + + for (unsigned int y = 0; y < th; y += 2) { + unsigned int sourceY = (sh * y + th / 2) / th; + + dstY = dst + y * tw; + srcY = src + sw * sourceY; + srcCb = srcC + (sourceY / 2) * sw + 0; + srcCr = srcC + (sourceY / 2) * sw + 1; + + for (unsigned int x = 0; x < tw; x += 2) { + unsigned int sourceX = (sw * x + tw / 2) / tw; + + dstY[x] = srcY[sourceX]; + dstY[tw + x] = srcY[sw + sourceX]; + dstY[x + 1] = srcY[sourceX + 1]; + dstY[tw + x + 1] = srcY[sw + sourceX + 1]; + + dstC[(y / 2) * tw + x + 0] = srcCb[(sourceX / 2) * 2]; + dstC[(y / 2) * tw + x + 1] = srcCr[(sourceX / 2) * 2]; + } + } +} diff --git a/spider-cam/libcamera/src/android/jpeg/thumbnailer.h b/spider-cam/libcamera/src/android/jpeg/thumbnailer.h new file mode 100644 index 0000000..1b836e5 --- /dev/null +++ b/spider-cam/libcamera/src/android/jpeg/thumbnailer.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * Simple image thumbnailer + */ + +#pragma once + +#include +#include + +#include "libcamera/internal/formats.h" + +class Thumbnailer +{ +public: + Thumbnailer(); + + void configure(const libcamera::Size &sourceSize, + libcamera::PixelFormat pixelFormat); + void createThumbnail(const libcamera::FrameBuffer &source, + const libcamera::Size &targetSize, + std::vector *dest); + const libcamera::PixelFormat &pixelFormat() const { return pixelFormat_; } + +private: + libcamera::PixelFormat pixelFormat_; + libcamera::Size sourceSize_; + + bool valid_; +}; diff --git a/spider-cam/libcamera/src/android/meson.build b/spider-cam/libcamera/src/android/meson.build new file mode 100644 index 0000000..6864612 --- /dev/null +++ b/spider-cam/libcamera/src/android/meson.build @@ -0,0 +1,75 @@ +# SPDX-License-Identifier: CC0-1.0 + +android_deps = [ + dependency('libexif', required : get_option('android')), + dependency('libjpeg', required : get_option('android')), + libcamera_private, +] + +android_enabled = true + +foreach dep : android_deps + if not dep.found() + android_enabled = false + subdir_done() + endif +endforeach + +libyuv_dep = dependency('libyuv', required : false) + +# Fallback to a subproject if libyuv isn't found, as it's typically not +# provided by distributions. +if not libyuv_dep.found() + cmake = import('cmake') + + libyuv_vars = cmake.subproject_options() + libyuv_vars.add_cmake_defines({'CMAKE_POSITION_INDEPENDENT_CODE': 'ON'}) + libyuv_vars.set_override_option('cpp_std', 'c++17') + libyuv_vars.append_compile_args('cpp', + '-Wno-sign-compare', + '-Wno-unused-variable', + '-Wno-unused-parameter') + libyuv_vars.append_link_args('-ljpeg') + libyuv = cmake.subproject('libyuv', options : libyuv_vars) + libyuv_dep = libyuv.dependency('yuv') +endif + +android_deps += [libyuv_dep] + +android_hal_sources = files([ + 'camera3_hal.cpp', + 'camera_capabilities.cpp', + 'camera_device.cpp', + 'camera_hal_config.cpp', + 'camera_hal_manager.cpp', + 'camera_metadata.cpp', + 'camera_ops.cpp', + 'camera_request.cpp', + 'camera_stream.cpp', + 'hal_framebuffer.cpp', + 'yuv/post_processor_yuv.cpp' +]) + +android_cpp_args = [] + +subdir('cros') +subdir('jpeg') +subdir('mm') + +android_camera_metadata_sources = files([ + 'metadata/camera_metadata.c', +]) + +android_camera_metadata = static_library('camera_metadata', + android_camera_metadata_sources, + c_args : '-Wno-shadow', + include_directories : android_includes) + +libcamera_hal = shared_library('libcamera-hal', + android_hal_sources, + name_prefix : '', + link_with : android_camera_metadata, + install : true, + cpp_args : android_cpp_args, + include_directories : android_includes, + dependencies : android_deps) diff --git a/spider-cam/libcamera/src/android/metadata/camera_metadata.c b/spider-cam/libcamera/src/android/metadata/camera_metadata.c new file mode 100644 index 0000000..b86586a --- /dev/null +++ b/spider-cam/libcamera/src/android/metadata/camera_metadata.c @@ -0,0 +1,1205 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "camera_metadata" + +/* + * Replace ALOGE() with a fprintf to stderr so that we don't need to + * re-implement Android's logging system. The log/log.h header file is no + * longer necessary once we removed dependency on ALOGE(). + */ +#define ALOGE(...) fprintf(stderr, LOG_TAG __VA_ARGS__) + +#include +#include + +#include +#include +#include +#include // for offsetof +#include +#include + +#define OK 0 +#define ERROR 1 +#define NOT_FOUND (-ENOENT) +#define SN_EVENT_LOG_ID 0x534e4554 + +#define ALIGN_TO(val, alignment) \ + (((uintptr_t)(val) + ((alignment) - 1)) & ~((alignment) - 1)) + +/** + * A single metadata entry, storing an array of values of a given type. If the + * array is no larger than 4 bytes in size, it is stored in the data.value[] + * array; otherwise, it can found in the parent's data array at index + * data.offset. + */ +#define ENTRY_ALIGNMENT ((size_t) 4) +typedef struct camera_metadata_buffer_entry { + uint32_t tag; + uint32_t count; + union { + uint32_t offset; + uint8_t value[4]; + } data; + uint8_t type; + uint8_t reserved[3]; +} camera_metadata_buffer_entry_t; + +typedef uint32_t metadata_uptrdiff_t; +typedef uint32_t metadata_size_t; + +/** + * A packet of metadata. This is a list of entries, each of which may point to + * its values stored at an offset in data. + * + * It is assumed by the utility functions that the memory layout of the packet + * is as follows: + * + * |-----------------------------------------------| + * | camera_metadata_t | + * | | + * |-----------------------------------------------| + * | reserved for future expansion | + * |-----------------------------------------------| + * | camera_metadata_buffer_entry_t #0 | + * |-----------------------------------------------| + * | .... | + * |-----------------------------------------------| + * | camera_metadata_buffer_entry_t #entry_count-1 | + * |-----------------------------------------------| + * | free space for | + * | (entry_capacity-entry_count) entries | + * |-----------------------------------------------| + * | start of camera_metadata.data | + * | | + * |-----------------------------------------------| + * | free space for | + * | (data_capacity-data_count) bytes | + * |-----------------------------------------------| + * + * With the total length of the whole packet being camera_metadata.size bytes. + * + * In short, the entries and data are contiguous in memory after the metadata + * header. + */ +#define METADATA_ALIGNMENT ((size_t) 4) +struct camera_metadata { + metadata_size_t size; + uint32_t version; + uint32_t flags; + metadata_size_t entry_count; + metadata_size_t entry_capacity; + metadata_uptrdiff_t entries_start; // Offset from camera_metadata + metadata_size_t data_count; + metadata_size_t data_capacity; + metadata_uptrdiff_t data_start; // Offset from camera_metadata + uint32_t padding; // padding to 8 bytes boundary + metadata_vendor_id_t vendor_id; +}; + +/** + * A datum of metadata. This corresponds to camera_metadata_entry_t::data + * with the difference that each element is not a pointer. We need to have a + * non-pointer type description in order to figure out the largest alignment + * requirement for data (DATA_ALIGNMENT). + */ +#define DATA_ALIGNMENT ((size_t) 8) +typedef union camera_metadata_data { + uint8_t u8; + int32_t i32; + float f; + int64_t i64; + double d; + camera_metadata_rational_t r; +} camera_metadata_data_t; + +_Static_assert(sizeof(metadata_size_t) == 4, + "Size of metadata_size_t must be 4"); +_Static_assert(sizeof(metadata_uptrdiff_t) == 4, + "Size of metadata_uptrdiff_t must be 4"); +_Static_assert(sizeof(metadata_vendor_id_t) == 8, + "Size of metadata_vendor_id_t must be 8"); +_Static_assert(sizeof(camera_metadata_data_t) == 8, + "Size of camera_metadata_data_t must be 8"); + +_Static_assert(offsetof(camera_metadata_buffer_entry_t, tag) == 0, + "Offset of tag must be 0"); +_Static_assert(offsetof(camera_metadata_buffer_entry_t, count) == 4, + "Offset of count must be 4"); +_Static_assert(offsetof(camera_metadata_buffer_entry_t, data) == 8, + "Offset of data must be 8"); +_Static_assert(offsetof(camera_metadata_buffer_entry_t, type) == 12, + "Offset of type must be 12"); +_Static_assert(sizeof(camera_metadata_buffer_entry_t) == 16, + "Size of camera_metadata_buffer_entry_t must be 16"); + +_Static_assert(offsetof(camera_metadata_t, size) == 0, + "Offset of size must be 0"); +_Static_assert(offsetof(camera_metadata_t, version) == 4, + "Offset of version must be 4"); +_Static_assert(offsetof(camera_metadata_t, flags) == 8, + "Offset of flags must be 8"); +_Static_assert(offsetof(camera_metadata_t, entry_count) == 12, + "Offset of entry_count must be 12"); +_Static_assert(offsetof(camera_metadata_t, entry_capacity) == 16, + "Offset of entry_capacity must be 16"); +_Static_assert(offsetof(camera_metadata_t, entries_start) == 20, + "Offset of entries_start must be 20"); +_Static_assert(offsetof(camera_metadata_t, data_count) == 24, + "Offset of data_count must be 24"); +_Static_assert(offsetof(camera_metadata_t, data_capacity) == 28, + "Offset of data_capacity must be 28"); +_Static_assert(offsetof(camera_metadata_t, data_start) == 32, + "Offset of data_start must be 32"); +_Static_assert(offsetof(camera_metadata_t, vendor_id) == 40, + "Offset of vendor_id must be 40"); +_Static_assert(sizeof(camera_metadata_t) == 48, + "Size of camera_metadata_t must be 48"); + +/** + * The preferred alignment of a packet of camera metadata. In general, + * this is the lowest common multiple of the constituents of a metadata + * package, i.e, of DATA_ALIGNMENT and ENTRY_ALIGNMENT. + */ +#define MAX_ALIGNMENT(A, B) (((A) > (B)) ? (A) : (B)) +#define METADATA_PACKET_ALIGNMENT \ + MAX_ALIGNMENT(MAX_ALIGNMENT(DATA_ALIGNMENT, METADATA_ALIGNMENT), ENTRY_ALIGNMENT) + +/** Versioning information */ +#define CURRENT_METADATA_VERSION 1 + +/** Flag definitions */ +#define FLAG_SORTED 0x00000001 + +/** Tag information */ + +typedef struct tag_info { + const char *tag_name; + uint8_t tag_type; +} tag_info_t; + +#include "camera_metadata_tag_info.c" + +const size_t camera_metadata_type_size[NUM_TYPES] = { + [TYPE_BYTE] = sizeof(uint8_t), + [TYPE_INT32] = sizeof(int32_t), + [TYPE_FLOAT] = sizeof(float), + [TYPE_INT64] = sizeof(int64_t), + [TYPE_DOUBLE] = sizeof(double), + [TYPE_RATIONAL] = sizeof(camera_metadata_rational_t) +}; + +const char *camera_metadata_type_names[NUM_TYPES] = { + [TYPE_BYTE] = "byte", + [TYPE_INT32] = "int32", + [TYPE_FLOAT] = "float", + [TYPE_INT64] = "int64", + [TYPE_DOUBLE] = "double", + [TYPE_RATIONAL] = "rational" +}; + +static camera_metadata_buffer_entry_t *get_entries( + const camera_metadata_t *metadata) { + return (camera_metadata_buffer_entry_t*) + ((uint8_t*)metadata + metadata->entries_start); +} + +static uint8_t *get_data(const camera_metadata_t *metadata) { + return (uint8_t*)metadata + metadata->data_start; +} + +size_t get_camera_metadata_alignment() { + return METADATA_PACKET_ALIGNMENT; +} + +camera_metadata_t *allocate_copy_camera_metadata_checked( + const camera_metadata_t *src, + size_t src_size) { + + if (src == NULL) { + return NULL; + } + + if (src_size < sizeof(camera_metadata_t)) { + ALOGE("%s: Source size too small!", __FUNCTION__); + // android_errorWriteLog(0x534e4554, "67782345"); + return NULL; + } + + void *buffer = malloc(src_size); + memcpy(buffer, src, src_size); + + camera_metadata_t *metadata = (camera_metadata_t*) buffer; + if (validate_camera_metadata_structure(metadata, &src_size) != OK) { + free(buffer); + return NULL; + } + + return metadata; +} + +camera_metadata_t *allocate_camera_metadata(size_t entry_capacity, + size_t data_capacity) { + + size_t memory_needed = calculate_camera_metadata_size(entry_capacity, + data_capacity); + void *buffer = malloc(memory_needed); + camera_metadata_t *metadata = place_camera_metadata( + buffer, memory_needed, entry_capacity, data_capacity); + if (!metadata) { + /* This should not happen when memory_needed is the same + * calculated in this function and in place_camera_metadata. + */ + free(buffer); + } + return metadata; +} + +camera_metadata_t *place_camera_metadata(void *dst, + size_t dst_size, + size_t entry_capacity, + size_t data_capacity) { + if (dst == NULL) return NULL; + + size_t memory_needed = calculate_camera_metadata_size(entry_capacity, + data_capacity); + if (memory_needed > dst_size) return NULL; + + camera_metadata_t *metadata = (camera_metadata_t*)dst; + metadata->version = CURRENT_METADATA_VERSION; + metadata->flags = 0; + metadata->entry_count = 0; + metadata->entry_capacity = entry_capacity; + metadata->entries_start = + ALIGN_TO(sizeof(camera_metadata_t), ENTRY_ALIGNMENT); + metadata->data_count = 0; + metadata->data_capacity = data_capacity; + metadata->size = memory_needed; + size_t data_unaligned = (uint8_t*)(get_entries(metadata) + + metadata->entry_capacity) - (uint8_t*)metadata; + metadata->data_start = ALIGN_TO(data_unaligned, DATA_ALIGNMENT); + metadata->vendor_id = CAMERA_METADATA_INVALID_VENDOR_ID; + + assert(validate_camera_metadata_structure(metadata, NULL) == OK); + return metadata; +} +void free_camera_metadata(camera_metadata_t *metadata) { + free(metadata); +} + +size_t calculate_camera_metadata_size(size_t entry_count, + size_t data_count) { + size_t memory_needed = sizeof(camera_metadata_t); + // Start entry list at aligned boundary + memory_needed = ALIGN_TO(memory_needed, ENTRY_ALIGNMENT); + memory_needed += sizeof(camera_metadata_buffer_entry_t[entry_count]); + // Start buffer list at aligned boundary + memory_needed = ALIGN_TO(memory_needed, DATA_ALIGNMENT); + memory_needed += sizeof(uint8_t[data_count]); + // Make sure camera metadata can be stacked in continuous memory + memory_needed = ALIGN_TO(memory_needed, METADATA_PACKET_ALIGNMENT); + return memory_needed; +} + +size_t get_camera_metadata_size(const camera_metadata_t *metadata) { + if (metadata == NULL) return ERROR; + + return metadata->size; +} + +size_t get_camera_metadata_compact_size(const camera_metadata_t *metadata) { + if (metadata == NULL) return ERROR; + + return calculate_camera_metadata_size(metadata->entry_count, + metadata->data_count); +} + +size_t get_camera_metadata_entry_count(const camera_metadata_t *metadata) { + return metadata->entry_count; +} + +size_t get_camera_metadata_entry_capacity(const camera_metadata_t *metadata) { + return metadata->entry_capacity; +} + +size_t get_camera_metadata_data_count(const camera_metadata_t *metadata) { + return metadata->data_count; +} + +size_t get_camera_metadata_data_capacity(const camera_metadata_t *metadata) { + return metadata->data_capacity; +} + +camera_metadata_t* copy_camera_metadata(void *dst, size_t dst_size, + const camera_metadata_t *src) { + size_t memory_needed = get_camera_metadata_compact_size(src); + + if (dst == NULL) return NULL; + if (dst_size < memory_needed) return NULL; + + camera_metadata_t *metadata = + place_camera_metadata(dst, dst_size, src->entry_count, src->data_count); + + metadata->flags = src->flags; + metadata->entry_count = src->entry_count; + metadata->data_count = src->data_count; + metadata->vendor_id = src->vendor_id; + + memcpy(get_entries(metadata), get_entries(src), + sizeof(camera_metadata_buffer_entry_t[metadata->entry_count])); + memcpy(get_data(metadata), get_data(src), + sizeof(uint8_t[metadata->data_count])); + + assert(validate_camera_metadata_structure(metadata, NULL) == OK); + return metadata; +} + +// This method should be used when the camera metadata cannot be trusted. For example, when it's +// read from Parcel. +static int validate_and_calculate_camera_metadata_entry_data_size(size_t *data_size, uint8_t type, + size_t data_count) { + if (type >= NUM_TYPES) return ERROR; + + // Check for overflow + if (data_count != 0 && + camera_metadata_type_size[type] > (SIZE_MAX - DATA_ALIGNMENT + 1) / data_count) { + // android_errorWriteLog(SN_EVENT_LOG_ID, "30741779"); + return ERROR; + } + + size_t data_bytes = data_count * camera_metadata_type_size[type]; + + if (data_size) { + *data_size = data_bytes <= 4 ? 0 : ALIGN_TO(data_bytes, DATA_ALIGNMENT); + } + + return OK; +} + +size_t calculate_camera_metadata_entry_data_size(uint8_t type, + size_t data_count) { + if (type >= NUM_TYPES) return 0; + + size_t data_bytes = data_count * + camera_metadata_type_size[type]; + + return data_bytes <= 4 ? 0 : ALIGN_TO(data_bytes, DATA_ALIGNMENT); +} + +int validate_camera_metadata_structure(const camera_metadata_t *metadata, + const size_t *expected_size) { + + if (metadata == NULL) { + ALOGE("%s: metadata is null!", __FUNCTION__); + return CAMERA_METADATA_VALIDATION_ERROR; + } + + uintptr_t aligned_ptr = ALIGN_TO(metadata, METADATA_PACKET_ALIGNMENT); + const uintptr_t alignmentOffset = aligned_ptr - (uintptr_t) metadata; + + // Check that the metadata pointer is well-aligned first. + { + static const struct { + const char *name; + size_t alignment; + } alignments[] = { + { + .name = "camera_metadata", + .alignment = METADATA_ALIGNMENT + }, + { + .name = "camera_metadata_buffer_entry", + .alignment = ENTRY_ALIGNMENT + }, + { + .name = "camera_metadata_data", + .alignment = DATA_ALIGNMENT + }, + }; + + for (size_t i = 0; i < sizeof(alignments)/sizeof(alignments[0]); ++i) { + uintptr_t aligned_ptr = ALIGN_TO((uintptr_t) metadata + alignmentOffset, + alignments[i].alignment); + + if ((uintptr_t)metadata + alignmentOffset != aligned_ptr) { + ALOGE("%s: Metadata pointer is not aligned (actual %p, " + "expected %p, offset %" PRIuPTR ") to type %s", + __FUNCTION__, metadata, + (void*)aligned_ptr, alignmentOffset, alignments[i].name); + return CAMERA_METADATA_VALIDATION_ERROR; + } + } + } + + /** + * Check that the metadata contents are correct + */ + + if (expected_size != NULL && metadata->size > *expected_size) { + ALOGE("%s: Metadata size (%" PRIu32 ") should be <= expected size (%zu)", + __FUNCTION__, metadata->size, *expected_size); + return CAMERA_METADATA_VALIDATION_ERROR; + } + + if (metadata->entry_count > metadata->entry_capacity) { + ALOGE("%s: Entry count (%" PRIu32 ") should be <= entry capacity " + "(%" PRIu32 ")", + __FUNCTION__, metadata->entry_count, metadata->entry_capacity); + return CAMERA_METADATA_VALIDATION_ERROR; + } + + if (metadata->data_count > metadata->data_capacity) { + ALOGE("%s: Data count (%" PRIu32 ") should be <= data capacity " + "(%" PRIu32 ")", + __FUNCTION__, metadata->data_count, metadata->data_capacity); + // android_errorWriteLog(SN_EVENT_LOG_ID, "30591838"); + return CAMERA_METADATA_VALIDATION_ERROR; + } + + const metadata_uptrdiff_t entries_end = + metadata->entries_start + metadata->entry_capacity; + if (entries_end < metadata->entries_start || // overflow check + entries_end > metadata->data_start) { + + ALOGE("%s: Entry start + capacity (%" PRIu32 ") should be <= data start " + "(%" PRIu32 ")", + __FUNCTION__, + (metadata->entries_start + metadata->entry_capacity), + metadata->data_start); + return CAMERA_METADATA_VALIDATION_ERROR; + } + + const metadata_uptrdiff_t data_end = + metadata->data_start + metadata->data_capacity; + if (data_end < metadata->data_start || // overflow check + data_end > metadata->size) { + + ALOGE("%s: Data start + capacity (%" PRIu32 ") should be <= total size " + "(%" PRIu32 ")", + __FUNCTION__, + (metadata->data_start + metadata->data_capacity), + metadata->size); + return CAMERA_METADATA_VALIDATION_ERROR; + } + + // Validate each entry + const metadata_size_t entry_count = metadata->entry_count; + camera_metadata_buffer_entry_t *entries = get_entries(metadata); + + for (size_t i = 0; i < entry_count; ++i) { + + if ((uintptr_t)&entries[i] + alignmentOffset != + ALIGN_TO((uintptr_t)&entries[i] + alignmentOffset, ENTRY_ALIGNMENT)) { + ALOGE("%s: Entry index %zu had bad alignment (address %p)," + " expected alignment %zu", + __FUNCTION__, i, &entries[i], ENTRY_ALIGNMENT); + return CAMERA_METADATA_VALIDATION_ERROR; + } + + camera_metadata_buffer_entry_t entry = entries[i]; + + if (entry.type >= NUM_TYPES) { + ALOGE("%s: Entry index %zu had a bad type %d", + __FUNCTION__, i, entry.type); + return CAMERA_METADATA_VALIDATION_ERROR; + } + + // TODO: fix vendor_tag_ops across processes so we don't need to special + // case vendor-specific tags + uint32_t tag_section = entry.tag >> 16; + int tag_type = get_local_camera_metadata_tag_type(entry.tag, metadata); + if (tag_type != (int)entry.type && tag_section < VENDOR_SECTION) { + ALOGE("%s: Entry index %zu had tag type %d, but the type was %d", + __FUNCTION__, i, tag_type, entry.type); + return CAMERA_METADATA_VALIDATION_ERROR; + } + + size_t data_size; + if (validate_and_calculate_camera_metadata_entry_data_size(&data_size, entry.type, + entry.count) != OK) { + ALOGE("%s: Entry data size is invalid. type: %u count: %u", __FUNCTION__, entry.type, + entry.count); + return CAMERA_METADATA_VALIDATION_ERROR; + } + + if (data_size != 0) { + camera_metadata_data_t *data = + (camera_metadata_data_t*) (get_data(metadata) + + entry.data.offset); + + if ((uintptr_t)data + alignmentOffset != + ALIGN_TO((uintptr_t)data + alignmentOffset, DATA_ALIGNMENT)) { + ALOGE("%s: Entry index %zu had bad data alignment (address %p)," + " expected align %zu, (tag name %s, data size %zu)", + __FUNCTION__, i, data, DATA_ALIGNMENT, + get_local_camera_metadata_tag_name(entry.tag, metadata) ? + : "unknown", data_size); + return CAMERA_METADATA_VALIDATION_ERROR; + } + + size_t data_entry_end = entry.data.offset + data_size; + if (data_entry_end < entry.data.offset || // overflow check + data_entry_end > metadata->data_capacity) { + + ALOGE("%s: Entry index %zu data ends (%zu) beyond the capacity " + "%" PRIu32, __FUNCTION__, i, data_entry_end, + metadata->data_capacity); + return CAMERA_METADATA_VALIDATION_ERROR; + } + + } else if (entry.count == 0) { + if (entry.data.offset != 0) { + ALOGE("%s: Entry index %zu had 0 items, but offset was non-0 " + "(%" PRIu32 "), tag name: %s", __FUNCTION__, i, entry.data.offset, + get_local_camera_metadata_tag_name(entry.tag, metadata) ? : "unknown"); + return CAMERA_METADATA_VALIDATION_ERROR; + } + } // else data stored inline, so we look at value which can be anything. + } + + if (alignmentOffset == 0) { + return OK; + } + return CAMERA_METADATA_VALIDATION_SHIFTED; +} + +int append_camera_metadata(camera_metadata_t *dst, + const camera_metadata_t *src) { + if (dst == NULL || src == NULL ) return ERROR; + + // Check for overflow + if (src->entry_count + dst->entry_count < src->entry_count) return ERROR; + if (src->data_count + dst->data_count < src->data_count) return ERROR; + // Check for space + if (dst->entry_capacity < src->entry_count + dst->entry_count) return ERROR; + if (dst->data_capacity < src->data_count + dst->data_count) return ERROR; + + if ((dst->vendor_id != CAMERA_METADATA_INVALID_VENDOR_ID) && + (src->vendor_id != CAMERA_METADATA_INVALID_VENDOR_ID)) { + if (dst->vendor_id != src->vendor_id) { + ALOGE("%s: Append for metadata from different vendors is" + "not supported!", __func__); + return ERROR; + } + } + + memcpy(get_entries(dst) + dst->entry_count, get_entries(src), + sizeof(camera_metadata_buffer_entry_t[src->entry_count])); + memcpy(get_data(dst) + dst->data_count, get_data(src), + sizeof(uint8_t[src->data_count])); + if (dst->data_count != 0) { + camera_metadata_buffer_entry_t *entry = get_entries(dst) + dst->entry_count; + for (size_t i = 0; i < src->entry_count; i++, entry++) { + if ( calculate_camera_metadata_entry_data_size(entry->type, + entry->count) > 0 ) { + entry->data.offset += dst->data_count; + } + } + } + if (dst->entry_count == 0) { + // Appending onto empty buffer, keep sorted state + dst->flags |= src->flags & FLAG_SORTED; + } else if (src->entry_count != 0) { + // Both src, dst are nonempty, cannot assume sort remains + dst->flags &= ~FLAG_SORTED; + } else { + // Src is empty, keep dst sorted state + } + dst->entry_count += src->entry_count; + dst->data_count += src->data_count; + + if (dst->vendor_id == CAMERA_METADATA_INVALID_VENDOR_ID) { + dst->vendor_id = src->vendor_id; + } + + assert(validate_camera_metadata_structure(dst, NULL) == OK); + return OK; +} + +camera_metadata_t *clone_camera_metadata(const camera_metadata_t *src) { + int res; + if (src == NULL) return NULL; + camera_metadata_t *clone = allocate_camera_metadata( + get_camera_metadata_entry_count(src), + get_camera_metadata_data_count(src)); + if (clone != NULL) { + res = append_camera_metadata(clone, src); + if (res != OK) { + free_camera_metadata(clone); + clone = NULL; + } + } + assert(validate_camera_metadata_structure(clone, NULL) == OK); + return clone; +} + +static int add_camera_metadata_entry_raw(camera_metadata_t *dst, + uint32_t tag, + uint8_t type, + const void *data, + size_t data_count) { + + if (dst == NULL) return ERROR; + if (dst->entry_count == dst->entry_capacity) return ERROR; + if (data_count && data == NULL) return ERROR; + + size_t data_bytes = + calculate_camera_metadata_entry_data_size(type, data_count); + if (data_bytes + dst->data_count > dst->data_capacity) return ERROR; + + size_t data_payload_bytes = + data_count * camera_metadata_type_size[type]; + camera_metadata_buffer_entry_t *entry = get_entries(dst) + dst->entry_count; + memset(entry, 0, sizeof(camera_metadata_buffer_entry_t)); + entry->tag = tag; + entry->type = type; + entry->count = data_count; + + if (data_bytes == 0) { + memcpy(entry->data.value, data, + data_payload_bytes); + } else { + entry->data.offset = dst->data_count; + memcpy(get_data(dst) + entry->data.offset, data, + data_payload_bytes); + dst->data_count += data_bytes; + } + dst->entry_count++; + dst->flags &= ~FLAG_SORTED; + assert(validate_camera_metadata_structure(dst, NULL) == OK); + return OK; +} + +int add_camera_metadata_entry(camera_metadata_t *dst, + uint32_t tag, + const void *data, + size_t data_count) { + + int type = get_local_camera_metadata_tag_type(tag, dst); + if (type == -1) { + ALOGE("%s: Unknown tag %04x.", __FUNCTION__, tag); + return ERROR; + } + + return add_camera_metadata_entry_raw(dst, + tag, + type, + data, + data_count); +} + +static int compare_entry_tags(const void *p1, const void *p2) { + uint32_t tag1 = ((camera_metadata_buffer_entry_t*)p1)->tag; + uint32_t tag2 = ((camera_metadata_buffer_entry_t*)p2)->tag; + return tag1 < tag2 ? -1 : + tag1 == tag2 ? 0 : + 1; +} + +int sort_camera_metadata(camera_metadata_t *dst) { + if (dst == NULL) return ERROR; + if (dst->flags & FLAG_SORTED) return OK; + + qsort(get_entries(dst), dst->entry_count, + sizeof(camera_metadata_buffer_entry_t), + compare_entry_tags); + dst->flags |= FLAG_SORTED; + + assert(validate_camera_metadata_structure(dst, NULL) == OK); + return OK; +} + +int get_camera_metadata_entry(camera_metadata_t *src, + size_t index, + camera_metadata_entry_t *entry) { + if (src == NULL || entry == NULL) return ERROR; + if (index >= src->entry_count) return ERROR; + + camera_metadata_buffer_entry_t *buffer_entry = get_entries(src) + index; + + entry->index = index; + entry->tag = buffer_entry->tag; + entry->type = buffer_entry->type; + entry->count = buffer_entry->count; + if (buffer_entry->count * + camera_metadata_type_size[buffer_entry->type] > 4) { + entry->data.u8 = get_data(src) + buffer_entry->data.offset; + } else { + entry->data.u8 = buffer_entry->data.value; + } + return OK; +} + +int get_camera_metadata_ro_entry(const camera_metadata_t *src, + size_t index, + camera_metadata_ro_entry_t *entry) { + return get_camera_metadata_entry((camera_metadata_t*)src, index, + (camera_metadata_entry_t*)entry); +} + +int find_camera_metadata_entry(camera_metadata_t *src, + uint32_t tag, + camera_metadata_entry_t *entry) { + if (src == NULL) return ERROR; + + uint32_t index; + if (src->flags & FLAG_SORTED) { + // Sorted entries, do a binary search + camera_metadata_buffer_entry_t *search_entry = NULL; + camera_metadata_buffer_entry_t key; + key.tag = tag; + search_entry = bsearch(&key, + get_entries(src), + src->entry_count, + sizeof(camera_metadata_buffer_entry_t), + compare_entry_tags); + if (search_entry == NULL) return NOT_FOUND; + index = search_entry - get_entries(src); + } else { + // Not sorted, linear search + camera_metadata_buffer_entry_t *search_entry = get_entries(src); + for (index = 0; index < src->entry_count; index++, search_entry++) { + if (search_entry->tag == tag) { + break; + } + } + if (index == src->entry_count) return NOT_FOUND; + } + + return get_camera_metadata_entry(src, index, + entry); +} + +int find_camera_metadata_ro_entry(const camera_metadata_t *src, + uint32_t tag, + camera_metadata_ro_entry_t *entry) { + return find_camera_metadata_entry((camera_metadata_t*)src, tag, + (camera_metadata_entry_t*)entry); +} + + +int delete_camera_metadata_entry(camera_metadata_t *dst, + size_t index) { + if (dst == NULL) return ERROR; + if (index >= dst->entry_count) return ERROR; + + camera_metadata_buffer_entry_t *entry = get_entries(dst) + index; + size_t data_bytes = calculate_camera_metadata_entry_data_size(entry->type, + entry->count); + + if (data_bytes > 0) { + // Shift data buffer to overwrite deleted data + uint8_t *start = get_data(dst) + entry->data.offset; + uint8_t *end = start + data_bytes; + size_t length = dst->data_count - entry->data.offset - data_bytes; + memmove(start, end, length); + + // Update all entry indices to account for shift + camera_metadata_buffer_entry_t *e = get_entries(dst); + size_t i; + for (i = 0; i < dst->entry_count; i++) { + if (calculate_camera_metadata_entry_data_size( + e->type, e->count) > 0 && + e->data.offset > entry->data.offset) { + e->data.offset -= data_bytes; + } + ++e; + } + dst->data_count -= data_bytes; + } + // Shift entry array + memmove(entry, entry + 1, + sizeof(camera_metadata_buffer_entry_t) * + (dst->entry_count - index - 1) ); + dst->entry_count -= 1; + + assert(validate_camera_metadata_structure(dst, NULL) == OK); + return OK; +} + +int update_camera_metadata_entry(camera_metadata_t *dst, + size_t index, + const void *data, + size_t data_count, + camera_metadata_entry_t *updated_entry) { + if (dst == NULL) return ERROR; + if (index >= dst->entry_count) return ERROR; + + camera_metadata_buffer_entry_t *entry = get_entries(dst) + index; + + size_t data_bytes = + calculate_camera_metadata_entry_data_size(entry->type, + data_count); + size_t data_payload_bytes = + data_count * camera_metadata_type_size[entry->type]; + + size_t entry_bytes = + calculate_camera_metadata_entry_data_size(entry->type, + entry->count); + if (data_bytes != entry_bytes) { + // May need to shift/add to data array + if (dst->data_capacity < dst->data_count + data_bytes - entry_bytes) { + // No room + return ERROR; + } + if (entry_bytes != 0) { + // Remove old data + uint8_t *start = get_data(dst) + entry->data.offset; + uint8_t *end = start + entry_bytes; + size_t length = dst->data_count - entry->data.offset - entry_bytes; + memmove(start, end, length); + dst->data_count -= entry_bytes; + + // Update all entry indices to account for shift + camera_metadata_buffer_entry_t *e = get_entries(dst); + size_t i; + for (i = 0; i < dst->entry_count; i++) { + if (calculate_camera_metadata_entry_data_size( + e->type, e->count) > 0 && + e->data.offset > entry->data.offset) { + e->data.offset -= entry_bytes; + } + ++e; + } + } + + if (data_bytes != 0) { + // Append new data + entry->data.offset = dst->data_count; + + memcpy(get_data(dst) + entry->data.offset, data, data_payload_bytes); + dst->data_count += data_bytes; + } + } else if (data_bytes != 0) { + // data size unchanged, reuse same data location + memcpy(get_data(dst) + entry->data.offset, data, data_payload_bytes); + } + + if (data_bytes == 0) { + // Data fits into entry + memcpy(entry->data.value, data, + data_payload_bytes); + } + + entry->count = data_count; + + if (updated_entry != NULL) { + get_camera_metadata_entry(dst, + index, + updated_entry); + } + + assert(validate_camera_metadata_structure(dst, NULL) == OK); + return OK; +} + +static const vendor_tag_ops_t *vendor_tag_ops = NULL; +static const struct vendor_tag_cache_ops *vendor_cache_ops = NULL; + +// Declared in system/media/private/camera/include/camera_metadata_hidden.h +const char *get_local_camera_metadata_section_name_vendor_id(uint32_t tag, + metadata_vendor_id_t id) { + uint32_t tag_section = tag >> 16; + if (tag_section >= VENDOR_SECTION && vendor_cache_ops != NULL && + id != CAMERA_METADATA_INVALID_VENDOR_ID) { + return vendor_cache_ops->get_section_name(tag, id); + } else if (tag_section >= VENDOR_SECTION && vendor_tag_ops != NULL) { + return vendor_tag_ops->get_section_name( + vendor_tag_ops, + tag); + } + if (tag_section >= ANDROID_SECTION_COUNT) { + return NULL; + } + return camera_metadata_section_names[tag_section]; +} + +// Declared in system/media/private/camera/include/camera_metadata_hidden.h +const char *get_local_camera_metadata_tag_name_vendor_id(uint32_t tag, + metadata_vendor_id_t id) { + uint32_t tag_section = tag >> 16; + if (tag_section >= VENDOR_SECTION && vendor_cache_ops != NULL && + id != CAMERA_METADATA_INVALID_VENDOR_ID) { + return vendor_cache_ops->get_tag_name(tag, id); + } else if (tag_section >= VENDOR_SECTION && vendor_tag_ops != NULL) { + return vendor_tag_ops->get_tag_name( + vendor_tag_ops, + tag); + } + if (tag_section >= ANDROID_SECTION_COUNT || + tag >= camera_metadata_section_bounds[tag_section][1] ) { + return NULL; + } + uint32_t tag_index = tag & 0xFFFF; + return tag_info[tag_section][tag_index].tag_name; +} + +// Declared in system/media/private/camera/include/camera_metadata_hidden.h +int get_local_camera_metadata_tag_type_vendor_id(uint32_t tag, + metadata_vendor_id_t id) { + uint32_t tag_section = tag >> 16; + if (tag_section >= VENDOR_SECTION && vendor_cache_ops != NULL && + id != CAMERA_METADATA_INVALID_VENDOR_ID) { + return vendor_cache_ops->get_tag_type(tag, id); + } else if (tag_section >= VENDOR_SECTION && vendor_tag_ops != NULL) { + return vendor_tag_ops->get_tag_type( + vendor_tag_ops, + tag); + } + if (tag_section >= ANDROID_SECTION_COUNT || + tag >= camera_metadata_section_bounds[tag_section][1] ) { + return -1; + } + uint32_t tag_index = tag & 0xFFFF; + return tag_info[tag_section][tag_index].tag_type; +} + +const char *get_camera_metadata_section_name(uint32_t tag) { + return get_local_camera_metadata_section_name(tag, NULL); +} + +const char *get_camera_metadata_tag_name(uint32_t tag) { + return get_local_camera_metadata_tag_name(tag, NULL); +} + +int get_camera_metadata_tag_type(uint32_t tag) { + return get_local_camera_metadata_tag_type(tag, NULL); +} + +const char *get_local_camera_metadata_section_name(uint32_t tag, + const camera_metadata_t *meta) { + metadata_vendor_id_t id = (NULL == meta) ? CAMERA_METADATA_INVALID_VENDOR_ID : + meta->vendor_id; + + return get_local_camera_metadata_section_name_vendor_id(tag, id); +} + +const char *get_local_camera_metadata_tag_name(uint32_t tag, + const camera_metadata_t *meta) { + metadata_vendor_id_t id = (NULL == meta) ? CAMERA_METADATA_INVALID_VENDOR_ID : + meta->vendor_id; + + return get_local_camera_metadata_tag_name_vendor_id(tag, id); +} + +int get_local_camera_metadata_tag_type(uint32_t tag, + const camera_metadata_t *meta) { + metadata_vendor_id_t id = (NULL == meta) ? CAMERA_METADATA_INVALID_VENDOR_ID : + meta->vendor_id; + + return get_local_camera_metadata_tag_type_vendor_id(tag, id); +} + +int set_camera_metadata_vendor_tag_ops(const vendor_tag_query_ops_t* ops) { + // **DEPRECATED** + (void) ops; + ALOGE("%s: This function has been deprecated", __FUNCTION__); + return ERROR; +} + +// Declared in system/media/private/camera/include/camera_metadata_hidden.h +int set_camera_metadata_vendor_ops(const vendor_tag_ops_t* ops) { + vendor_tag_ops = ops; + return OK; +} + +// Declared in system/media/private/camera/include/camera_metadata_hidden.h +int set_camera_metadata_vendor_cache_ops( + const struct vendor_tag_cache_ops *query_cache_ops) { + vendor_cache_ops = query_cache_ops; + return OK; +} + +// Declared in system/media/private/camera/include/camera_metadata_hidden.h +void set_camera_metadata_vendor_id(camera_metadata_t *meta, + metadata_vendor_id_t id) { + if (NULL != meta) { + meta->vendor_id = id; + } +} + +// Declared in system/media/private/camera/include/camera_metadata_hidden.h +metadata_vendor_id_t get_camera_metadata_vendor_id( + const camera_metadata_t *meta) { + metadata_vendor_id_t ret = CAMERA_METADATA_INVALID_VENDOR_ID; + + if (NULL != meta) { + ret = meta->vendor_id; + } + + return ret; +} + +static void print_data(int fd, const uint8_t *data_ptr, uint32_t tag, int type, + int count, + int indentation); + +void dump_camera_metadata(const camera_metadata_t *metadata, + int fd, + int verbosity) { + dump_indented_camera_metadata(metadata, fd, verbosity, 0); +} + +void dump_indented_camera_metadata(const camera_metadata_t *metadata, + int fd, + int verbosity, + int indentation) { + if (metadata == NULL) { + dprintf(fd, "%*sDumping camera metadata array: Not allocated\n", + indentation, ""); + return; + } + unsigned int i; + dprintf(fd, + "%*sDumping camera metadata array: %" PRIu32 " / %" PRIu32 " entries, " + "%" PRIu32 " / %" PRIu32 " bytes of extra data.\n", indentation, "", + metadata->entry_count, metadata->entry_capacity, + metadata->data_count, metadata->data_capacity); + dprintf(fd, "%*sVersion: %d, Flags: %08x\n", + indentation + 2, "", + metadata->version, metadata->flags); + camera_metadata_buffer_entry_t *entry = get_entries(metadata); + for (i=0; i < metadata->entry_count; i++, entry++) { + + const char *tag_name, *tag_section; + tag_section = get_local_camera_metadata_section_name(entry->tag, metadata); + if (tag_section == NULL) { + tag_section = "unknownSection"; + } + tag_name = get_local_camera_metadata_tag_name(entry->tag, metadata); + if (tag_name == NULL) { + tag_name = "unknownTag"; + } + const char *type_name; + if (entry->type >= NUM_TYPES) { + type_name = "unknown"; + } else { + type_name = camera_metadata_type_names[entry->type]; + } + dprintf(fd, "%*s%s.%s (%05x): %s[%" PRIu32 "]\n", + indentation + 2, "", + tag_section, + tag_name, + entry->tag, + type_name, + entry->count); + + if (verbosity < 1) continue; + + if (entry->type >= NUM_TYPES) continue; + + size_t type_size = camera_metadata_type_size[entry->type]; + uint8_t *data_ptr; + if ( type_size * entry->count > 4 ) { + if (entry->data.offset >= metadata->data_count) { + ALOGE("%s: Malformed entry data offset: %" PRIu32 " (max %" PRIu32 ")", + __FUNCTION__, + entry->data.offset, + metadata->data_count); + continue; + } + data_ptr = get_data(metadata) + entry->data.offset; + } else { + data_ptr = entry->data.value; + } + int count = entry->count; + if (verbosity < 2 && count > 16) count = 16; + + print_data(fd, data_ptr, entry->tag, entry->type, count, indentation); + } +} + +static void print_data(int fd, const uint8_t *data_ptr, uint32_t tag, + int type, int count, int indentation) { + static int values_per_line[NUM_TYPES] = { + [TYPE_BYTE] = 16, + [TYPE_INT32] = 4, + [TYPE_FLOAT] = 8, + [TYPE_INT64] = 2, + [TYPE_DOUBLE] = 4, + [TYPE_RATIONAL] = 2, + }; + size_t type_size = camera_metadata_type_size[type]; + char value_string_tmp[CAMERA_METADATA_ENUM_STRING_MAX_SIZE]; + uint32_t value; + + int lines = count / values_per_line[type]; + if (count % values_per_line[type] != 0) lines++; + + int index = 0; + int j, k; + for (j = 0; j < lines; j++) { + dprintf(fd, "%*s[", indentation + 4, ""); + for (k = 0; + k < values_per_line[type] && count > 0; + k++, count--, index += type_size) { + + switch (type) { + case TYPE_BYTE: + value = *(data_ptr + index); + if (camera_metadata_enum_snprint(tag, + value, + value_string_tmp, + sizeof(value_string_tmp)) + == OK) { + dprintf(fd, "%s ", value_string_tmp); + } else { + dprintf(fd, "%hhu ", + *(data_ptr + index)); + } + break; + case TYPE_INT32: + value = + *(int32_t*)(data_ptr + index); + if (camera_metadata_enum_snprint(tag, + value, + value_string_tmp, + sizeof(value_string_tmp)) + == OK) { + dprintf(fd, "%s ", value_string_tmp); + } else { + dprintf(fd, "%" PRId32 " ", + *(int32_t*)(data_ptr + index)); + } + break; + case TYPE_FLOAT: + dprintf(fd, "%0.8f ", + *(float*)(data_ptr + index)); + break; + case TYPE_INT64: + dprintf(fd, "%" PRId64 " ", + *(int64_t*)(data_ptr + index)); + break; + case TYPE_DOUBLE: + dprintf(fd, "%0.8f ", + *(double*)(data_ptr + index)); + break; + case TYPE_RATIONAL: { + int32_t numerator = *(int32_t*)(data_ptr + index); + int32_t denominator = *(int32_t*)(data_ptr + index + 4); + dprintf(fd, "(%d / %d) ", + numerator, denominator); + break; + } + default: + dprintf(fd, "??? "); + } + } + dprintf(fd, "]\n"); + } +} diff --git a/spider-cam/libcamera/src/android/metadata/camera_metadata_tag_info.c b/spider-cam/libcamera/src/android/metadata/camera_metadata_tag_info.c new file mode 100644 index 0000000..e1b81f6 --- /dev/null +++ b/spider-cam/libcamera/src/android/metadata/camera_metadata_tag_info.c @@ -0,0 +1,2812 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * !! Do not reference this file directly !! + * + * It is logically a part of camera_metadata.c. It is broken out for ease of + * maintaining the tag info. + * + * Array assignments are done using specified-index syntax to keep things in + * sync with camera_metadata_tags.h + */ + +/** + * ! Do not edit this file directly ! + * + * Generated automatically from camera_metadata_tag_info.mako + */ + +const char *camera_metadata_section_names[ANDROID_SECTION_COUNT] = { + [ANDROID_COLOR_CORRECTION] = "android.colorCorrection", + [ANDROID_CONTROL] = "android.control", + [ANDROID_DEMOSAIC] = "android.demosaic", + [ANDROID_EDGE] = "android.edge", + [ANDROID_FLASH] = "android.flash", + [ANDROID_FLASH_INFO] = "android.flash.info", + [ANDROID_HOT_PIXEL] = "android.hotPixel", + [ANDROID_JPEG] = "android.jpeg", + [ANDROID_LENS] = "android.lens", + [ANDROID_LENS_INFO] = "android.lens.info", + [ANDROID_NOISE_REDUCTION] = "android.noiseReduction", + [ANDROID_QUIRKS] = "android.quirks", + [ANDROID_REQUEST] = "android.request", + [ANDROID_SCALER] = "android.scaler", + [ANDROID_SENSOR] = "android.sensor", + [ANDROID_SENSOR_INFO] = "android.sensor.info", + [ANDROID_SHADING] = "android.shading", + [ANDROID_STATISTICS] = "android.statistics", + [ANDROID_STATISTICS_INFO] = "android.statistics.info", + [ANDROID_TONEMAP] = "android.tonemap", + [ANDROID_LED] = "android.led", + [ANDROID_INFO] = "android.info", + [ANDROID_BLACK_LEVEL] = "android.blackLevel", + [ANDROID_SYNC] = "android.sync", + [ANDROID_REPROCESS] = "android.reprocess", + [ANDROID_DEPTH] = "android.depth", + [ANDROID_LOGICAL_MULTI_CAMERA] = "android.logicalMultiCamera", + [ANDROID_DISTORTION_CORRECTION] + = "android.distortionCorrection", +}; + +unsigned int camera_metadata_section_bounds[ANDROID_SECTION_COUNT][2] = { + [ANDROID_COLOR_CORRECTION] = { ANDROID_COLOR_CORRECTION_START, + ANDROID_COLOR_CORRECTION_END }, + [ANDROID_CONTROL] = { ANDROID_CONTROL_START, + ANDROID_CONTROL_END }, + [ANDROID_DEMOSAIC] = { ANDROID_DEMOSAIC_START, + ANDROID_DEMOSAIC_END }, + [ANDROID_EDGE] = { ANDROID_EDGE_START, + ANDROID_EDGE_END }, + [ANDROID_FLASH] = { ANDROID_FLASH_START, + ANDROID_FLASH_END }, + [ANDROID_FLASH_INFO] = { ANDROID_FLASH_INFO_START, + ANDROID_FLASH_INFO_END }, + [ANDROID_HOT_PIXEL] = { ANDROID_HOT_PIXEL_START, + ANDROID_HOT_PIXEL_END }, + [ANDROID_JPEG] = { ANDROID_JPEG_START, + ANDROID_JPEG_END }, + [ANDROID_LENS] = { ANDROID_LENS_START, + ANDROID_LENS_END }, + [ANDROID_LENS_INFO] = { ANDROID_LENS_INFO_START, + ANDROID_LENS_INFO_END }, + [ANDROID_NOISE_REDUCTION] = { ANDROID_NOISE_REDUCTION_START, + ANDROID_NOISE_REDUCTION_END }, + [ANDROID_QUIRKS] = { ANDROID_QUIRKS_START, + ANDROID_QUIRKS_END }, + [ANDROID_REQUEST] = { ANDROID_REQUEST_START, + ANDROID_REQUEST_END }, + [ANDROID_SCALER] = { ANDROID_SCALER_START, + ANDROID_SCALER_END }, + [ANDROID_SENSOR] = { ANDROID_SENSOR_START, + ANDROID_SENSOR_END }, + [ANDROID_SENSOR_INFO] = { ANDROID_SENSOR_INFO_START, + ANDROID_SENSOR_INFO_END }, + [ANDROID_SHADING] = { ANDROID_SHADING_START, + ANDROID_SHADING_END }, + [ANDROID_STATISTICS] = { ANDROID_STATISTICS_START, + ANDROID_STATISTICS_END }, + [ANDROID_STATISTICS_INFO] = { ANDROID_STATISTICS_INFO_START, + ANDROID_STATISTICS_INFO_END }, + [ANDROID_TONEMAP] = { ANDROID_TONEMAP_START, + ANDROID_TONEMAP_END }, + [ANDROID_LED] = { ANDROID_LED_START, + ANDROID_LED_END }, + [ANDROID_INFO] = { ANDROID_INFO_START, + ANDROID_INFO_END }, + [ANDROID_BLACK_LEVEL] = { ANDROID_BLACK_LEVEL_START, + ANDROID_BLACK_LEVEL_END }, + [ANDROID_SYNC] = { ANDROID_SYNC_START, + ANDROID_SYNC_END }, + [ANDROID_REPROCESS] = { ANDROID_REPROCESS_START, + ANDROID_REPROCESS_END }, + [ANDROID_DEPTH] = { ANDROID_DEPTH_START, + ANDROID_DEPTH_END }, + [ANDROID_LOGICAL_MULTI_CAMERA] = { ANDROID_LOGICAL_MULTI_CAMERA_START, + ANDROID_LOGICAL_MULTI_CAMERA_END }, + [ANDROID_DISTORTION_CORRECTION] + = { ANDROID_DISTORTION_CORRECTION_START, + ANDROID_DISTORTION_CORRECTION_END }, +}; + +static tag_info_t android_color_correction[ANDROID_COLOR_CORRECTION_END - + ANDROID_COLOR_CORRECTION_START] = { + [ ANDROID_COLOR_CORRECTION_MODE - ANDROID_COLOR_CORRECTION_START ] = + { "mode", TYPE_BYTE }, + [ ANDROID_COLOR_CORRECTION_TRANSFORM - ANDROID_COLOR_CORRECTION_START ] = + { "transform", TYPE_RATIONAL + }, + [ ANDROID_COLOR_CORRECTION_GAINS - ANDROID_COLOR_CORRECTION_START ] = + { "gains", TYPE_FLOAT }, + [ ANDROID_COLOR_CORRECTION_ABERRATION_MODE - ANDROID_COLOR_CORRECTION_START ] = + { "aberrationMode", TYPE_BYTE }, + [ ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES - ANDROID_COLOR_CORRECTION_START ] = + { "availableAberrationModes", TYPE_BYTE }, +}; + +static tag_info_t android_control[ANDROID_CONTROL_END - + ANDROID_CONTROL_START] = { + [ ANDROID_CONTROL_AE_ANTIBANDING_MODE - ANDROID_CONTROL_START ] = + { "aeAntibandingMode", TYPE_BYTE }, + [ ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION - ANDROID_CONTROL_START ] = + { "aeExposureCompensation", TYPE_INT32 }, + [ ANDROID_CONTROL_AE_LOCK - ANDROID_CONTROL_START ] = + { "aeLock", TYPE_BYTE }, + [ ANDROID_CONTROL_AE_MODE - ANDROID_CONTROL_START ] = + { "aeMode", TYPE_BYTE }, + [ ANDROID_CONTROL_AE_REGIONS - ANDROID_CONTROL_START ] = + { "aeRegions", TYPE_INT32 }, + [ ANDROID_CONTROL_AE_TARGET_FPS_RANGE - ANDROID_CONTROL_START ] = + { "aeTargetFpsRange", TYPE_INT32 }, + [ ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER - ANDROID_CONTROL_START ] = + { "aePrecaptureTrigger", TYPE_BYTE }, + [ ANDROID_CONTROL_AF_MODE - ANDROID_CONTROL_START ] = + { "afMode", TYPE_BYTE }, + [ ANDROID_CONTROL_AF_REGIONS - ANDROID_CONTROL_START ] = + { "afRegions", TYPE_INT32 }, + [ ANDROID_CONTROL_AF_TRIGGER - ANDROID_CONTROL_START ] = + { "afTrigger", TYPE_BYTE }, + [ ANDROID_CONTROL_AWB_LOCK - ANDROID_CONTROL_START ] = + { "awbLock", TYPE_BYTE }, + [ ANDROID_CONTROL_AWB_MODE - ANDROID_CONTROL_START ] = + { "awbMode", TYPE_BYTE }, + [ ANDROID_CONTROL_AWB_REGIONS - ANDROID_CONTROL_START ] = + { "awbRegions", TYPE_INT32 }, + [ ANDROID_CONTROL_CAPTURE_INTENT - ANDROID_CONTROL_START ] = + { "captureIntent", TYPE_BYTE }, + [ ANDROID_CONTROL_EFFECT_MODE - ANDROID_CONTROL_START ] = + { "effectMode", TYPE_BYTE }, + [ ANDROID_CONTROL_MODE - ANDROID_CONTROL_START ] = + { "mode", TYPE_BYTE }, + [ ANDROID_CONTROL_SCENE_MODE - ANDROID_CONTROL_START ] = + { "sceneMode", TYPE_BYTE }, + [ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE - ANDROID_CONTROL_START ] = + { "videoStabilizationMode", TYPE_BYTE }, + [ ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES - ANDROID_CONTROL_START ] = + { "aeAvailableAntibandingModes", TYPE_BYTE }, + [ ANDROID_CONTROL_AE_AVAILABLE_MODES - ANDROID_CONTROL_START ] = + { "aeAvailableModes", TYPE_BYTE }, + [ ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES - ANDROID_CONTROL_START ] = + { "aeAvailableTargetFpsRanges", TYPE_INT32 }, + [ ANDROID_CONTROL_AE_COMPENSATION_RANGE - ANDROID_CONTROL_START ] = + { "aeCompensationRange", TYPE_INT32 }, + [ ANDROID_CONTROL_AE_COMPENSATION_STEP - ANDROID_CONTROL_START ] = + { "aeCompensationStep", TYPE_RATIONAL + }, + [ ANDROID_CONTROL_AF_AVAILABLE_MODES - ANDROID_CONTROL_START ] = + { "afAvailableModes", TYPE_BYTE }, + [ ANDROID_CONTROL_AVAILABLE_EFFECTS - ANDROID_CONTROL_START ] = + { "availableEffects", TYPE_BYTE }, + [ ANDROID_CONTROL_AVAILABLE_SCENE_MODES - ANDROID_CONTROL_START ] = + { "availableSceneModes", TYPE_BYTE }, + [ ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES - ANDROID_CONTROL_START ] = + { "availableVideoStabilizationModes", + TYPE_BYTE }, + [ ANDROID_CONTROL_AWB_AVAILABLE_MODES - ANDROID_CONTROL_START ] = + { "awbAvailableModes", TYPE_BYTE }, + [ ANDROID_CONTROL_MAX_REGIONS - ANDROID_CONTROL_START ] = + { "maxRegions", TYPE_INT32 }, + [ ANDROID_CONTROL_SCENE_MODE_OVERRIDES - ANDROID_CONTROL_START ] = + { "sceneModeOverrides", TYPE_BYTE }, + [ ANDROID_CONTROL_AE_PRECAPTURE_ID - ANDROID_CONTROL_START ] = + { "aePrecaptureId", TYPE_INT32 }, + [ ANDROID_CONTROL_AE_STATE - ANDROID_CONTROL_START ] = + { "aeState", TYPE_BYTE }, + [ ANDROID_CONTROL_AF_STATE - ANDROID_CONTROL_START ] = + { "afState", TYPE_BYTE }, + [ ANDROID_CONTROL_AF_TRIGGER_ID - ANDROID_CONTROL_START ] = + { "afTriggerId", TYPE_INT32 }, + [ ANDROID_CONTROL_AWB_STATE - ANDROID_CONTROL_START ] = + { "awbState", TYPE_BYTE }, + [ ANDROID_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS - ANDROID_CONTROL_START ] = + { "availableHighSpeedVideoConfigurations", + TYPE_INT32 }, + [ ANDROID_CONTROL_AE_LOCK_AVAILABLE - ANDROID_CONTROL_START ] = + { "aeLockAvailable", TYPE_BYTE }, + [ ANDROID_CONTROL_AWB_LOCK_AVAILABLE - ANDROID_CONTROL_START ] = + { "awbLockAvailable", TYPE_BYTE }, + [ ANDROID_CONTROL_AVAILABLE_MODES - ANDROID_CONTROL_START ] = + { "availableModes", TYPE_BYTE }, + [ ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE - ANDROID_CONTROL_START ] = + { "postRawSensitivityBoostRange", TYPE_INT32 }, + [ ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST - ANDROID_CONTROL_START ] = + { "postRawSensitivityBoost", TYPE_INT32 }, + [ ANDROID_CONTROL_ENABLE_ZSL - ANDROID_CONTROL_START ] = + { "enableZsl", TYPE_BYTE }, + [ ANDROID_CONTROL_AF_SCENE_CHANGE - ANDROID_CONTROL_START ] = + { "afSceneChange", TYPE_BYTE }, +}; + +static tag_info_t android_demosaic[ANDROID_DEMOSAIC_END - + ANDROID_DEMOSAIC_START] = { + [ ANDROID_DEMOSAIC_MODE - ANDROID_DEMOSAIC_START ] = + { "mode", TYPE_BYTE }, +}; + +static tag_info_t android_edge[ANDROID_EDGE_END - + ANDROID_EDGE_START] = { + [ ANDROID_EDGE_MODE - ANDROID_EDGE_START ] = + { "mode", TYPE_BYTE }, + [ ANDROID_EDGE_STRENGTH - ANDROID_EDGE_START ] = + { "strength", TYPE_BYTE }, + [ ANDROID_EDGE_AVAILABLE_EDGE_MODES - ANDROID_EDGE_START ] = + { "availableEdgeModes", TYPE_BYTE }, +}; + +static tag_info_t android_flash[ANDROID_FLASH_END - + ANDROID_FLASH_START] = { + [ ANDROID_FLASH_FIRING_POWER - ANDROID_FLASH_START ] = + { "firingPower", TYPE_BYTE }, + [ ANDROID_FLASH_FIRING_TIME - ANDROID_FLASH_START ] = + { "firingTime", TYPE_INT64 }, + [ ANDROID_FLASH_MODE - ANDROID_FLASH_START ] = + { "mode", TYPE_BYTE }, + [ ANDROID_FLASH_COLOR_TEMPERATURE - ANDROID_FLASH_START ] = + { "colorTemperature", TYPE_BYTE }, + [ ANDROID_FLASH_MAX_ENERGY - ANDROID_FLASH_START ] = + { "maxEnergy", TYPE_BYTE }, + [ ANDROID_FLASH_STATE - ANDROID_FLASH_START ] = + { "state", TYPE_BYTE }, +}; + +static tag_info_t android_flash_info[ANDROID_FLASH_INFO_END - + ANDROID_FLASH_INFO_START] = { + [ ANDROID_FLASH_INFO_AVAILABLE - ANDROID_FLASH_INFO_START ] = + { "available", TYPE_BYTE }, + [ ANDROID_FLASH_INFO_CHARGE_DURATION - ANDROID_FLASH_INFO_START ] = + { "chargeDuration", TYPE_INT64 }, +}; + +static tag_info_t android_hot_pixel[ANDROID_HOT_PIXEL_END - + ANDROID_HOT_PIXEL_START] = { + [ ANDROID_HOT_PIXEL_MODE - ANDROID_HOT_PIXEL_START ] = + { "mode", TYPE_BYTE }, + [ ANDROID_HOT_PIXEL_AVAILABLE_HOT_PIXEL_MODES - ANDROID_HOT_PIXEL_START ] = + { "availableHotPixelModes", TYPE_BYTE }, +}; + +static tag_info_t android_jpeg[ANDROID_JPEG_END - + ANDROID_JPEG_START] = { + [ ANDROID_JPEG_GPS_COORDINATES - ANDROID_JPEG_START ] = + { "gpsCoordinates", TYPE_DOUBLE }, + [ ANDROID_JPEG_GPS_PROCESSING_METHOD - ANDROID_JPEG_START ] = + { "gpsProcessingMethod", TYPE_BYTE }, + [ ANDROID_JPEG_GPS_TIMESTAMP - ANDROID_JPEG_START ] = + { "gpsTimestamp", TYPE_INT64 }, + [ ANDROID_JPEG_ORIENTATION - ANDROID_JPEG_START ] = + { "orientation", TYPE_INT32 }, + [ ANDROID_JPEG_QUALITY - ANDROID_JPEG_START ] = + { "quality", TYPE_BYTE }, + [ ANDROID_JPEG_THUMBNAIL_QUALITY - ANDROID_JPEG_START ] = + { "thumbnailQuality", TYPE_BYTE }, + [ ANDROID_JPEG_THUMBNAIL_SIZE - ANDROID_JPEG_START ] = + { "thumbnailSize", TYPE_INT32 }, + [ ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES - ANDROID_JPEG_START ] = + { "availableThumbnailSizes", TYPE_INT32 }, + [ ANDROID_JPEG_MAX_SIZE - ANDROID_JPEG_START ] = + { "maxSize", TYPE_INT32 }, + [ ANDROID_JPEG_SIZE - ANDROID_JPEG_START ] = + { "size", TYPE_INT32 }, +}; + +static tag_info_t android_lens[ANDROID_LENS_END - + ANDROID_LENS_START] = { + [ ANDROID_LENS_APERTURE - ANDROID_LENS_START ] = + { "aperture", TYPE_FLOAT }, + [ ANDROID_LENS_FILTER_DENSITY - ANDROID_LENS_START ] = + { "filterDensity", TYPE_FLOAT }, + [ ANDROID_LENS_FOCAL_LENGTH - ANDROID_LENS_START ] = + { "focalLength", TYPE_FLOAT }, + [ ANDROID_LENS_FOCUS_DISTANCE - ANDROID_LENS_START ] = + { "focusDistance", TYPE_FLOAT }, + [ ANDROID_LENS_OPTICAL_STABILIZATION_MODE - ANDROID_LENS_START ] = + { "opticalStabilizationMode", TYPE_BYTE }, + [ ANDROID_LENS_FACING - ANDROID_LENS_START ] = + { "facing", TYPE_BYTE }, + [ ANDROID_LENS_POSE_ROTATION - ANDROID_LENS_START ] = + { "poseRotation", TYPE_FLOAT }, + [ ANDROID_LENS_POSE_TRANSLATION - ANDROID_LENS_START ] = + { "poseTranslation", TYPE_FLOAT }, + [ ANDROID_LENS_FOCUS_RANGE - ANDROID_LENS_START ] = + { "focusRange", TYPE_FLOAT }, + [ ANDROID_LENS_STATE - ANDROID_LENS_START ] = + { "state", TYPE_BYTE }, + [ ANDROID_LENS_INTRINSIC_CALIBRATION - ANDROID_LENS_START ] = + { "intrinsicCalibration", TYPE_FLOAT }, + [ ANDROID_LENS_RADIAL_DISTORTION - ANDROID_LENS_START ] = + { "radialDistortion", TYPE_FLOAT }, + [ ANDROID_LENS_POSE_REFERENCE - ANDROID_LENS_START ] = + { "poseReference", TYPE_BYTE }, + [ ANDROID_LENS_DISTORTION - ANDROID_LENS_START ] = + { "distortion", TYPE_FLOAT }, +}; + +static tag_info_t android_lens_info[ANDROID_LENS_INFO_END - + ANDROID_LENS_INFO_START] = { + [ ANDROID_LENS_INFO_AVAILABLE_APERTURES - ANDROID_LENS_INFO_START ] = + { "availableApertures", TYPE_FLOAT }, + [ ANDROID_LENS_INFO_AVAILABLE_FILTER_DENSITIES - ANDROID_LENS_INFO_START ] = + { "availableFilterDensities", TYPE_FLOAT }, + [ ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS - ANDROID_LENS_INFO_START ] = + { "availableFocalLengths", TYPE_FLOAT }, + [ ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION - ANDROID_LENS_INFO_START ] = + { "availableOpticalStabilization", TYPE_BYTE }, + [ ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE - ANDROID_LENS_INFO_START ] = + { "hyperfocalDistance", TYPE_FLOAT }, + [ ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE - ANDROID_LENS_INFO_START ] = + { "minimumFocusDistance", TYPE_FLOAT }, + [ ANDROID_LENS_INFO_SHADING_MAP_SIZE - ANDROID_LENS_INFO_START ] = + { "shadingMapSize", TYPE_INT32 }, + [ ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION - ANDROID_LENS_INFO_START ] = + { "focusDistanceCalibration", TYPE_BYTE }, +}; + +static tag_info_t android_noise_reduction[ANDROID_NOISE_REDUCTION_END - + ANDROID_NOISE_REDUCTION_START] = { + [ ANDROID_NOISE_REDUCTION_MODE - ANDROID_NOISE_REDUCTION_START ] = + { "mode", TYPE_BYTE }, + [ ANDROID_NOISE_REDUCTION_STRENGTH - ANDROID_NOISE_REDUCTION_START ] = + { "strength", TYPE_BYTE }, + [ ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES - ANDROID_NOISE_REDUCTION_START ] = + { "availableNoiseReductionModes", TYPE_BYTE }, +}; + +static tag_info_t android_quirks[ANDROID_QUIRKS_END - + ANDROID_QUIRKS_START] = { + [ ANDROID_QUIRKS_METERING_CROP_REGION - ANDROID_QUIRKS_START ] = + { "meteringCropRegion", TYPE_BYTE }, + [ ANDROID_QUIRKS_TRIGGER_AF_WITH_AUTO - ANDROID_QUIRKS_START ] = + { "triggerAfWithAuto", TYPE_BYTE }, + [ ANDROID_QUIRKS_USE_ZSL_FORMAT - ANDROID_QUIRKS_START ] = + { "useZslFormat", TYPE_BYTE }, + [ ANDROID_QUIRKS_USE_PARTIAL_RESULT - ANDROID_QUIRKS_START ] = + { "usePartialResult", TYPE_BYTE }, + [ ANDROID_QUIRKS_PARTIAL_RESULT - ANDROID_QUIRKS_START ] = + { "partialResult", TYPE_BYTE }, +}; + +static tag_info_t android_request[ANDROID_REQUEST_END - + ANDROID_REQUEST_START] = { + [ ANDROID_REQUEST_FRAME_COUNT - ANDROID_REQUEST_START ] = + { "frameCount", TYPE_INT32 }, + [ ANDROID_REQUEST_ID - ANDROID_REQUEST_START ] = + { "id", TYPE_INT32 }, + [ ANDROID_REQUEST_INPUT_STREAMS - ANDROID_REQUEST_START ] = + { "inputStreams", TYPE_INT32 }, + [ ANDROID_REQUEST_METADATA_MODE - ANDROID_REQUEST_START ] = + { "metadataMode", TYPE_BYTE }, + [ ANDROID_REQUEST_OUTPUT_STREAMS - ANDROID_REQUEST_START ] = + { "outputStreams", TYPE_INT32 }, + [ ANDROID_REQUEST_TYPE - ANDROID_REQUEST_START ] = + { "type", TYPE_BYTE }, + [ ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS - ANDROID_REQUEST_START ] = + { "maxNumOutputStreams", TYPE_INT32 }, + [ ANDROID_REQUEST_MAX_NUM_REPROCESS_STREAMS - ANDROID_REQUEST_START ] = + { "maxNumReprocessStreams", TYPE_INT32 }, + [ ANDROID_REQUEST_MAX_NUM_INPUT_STREAMS - ANDROID_REQUEST_START ] = + { "maxNumInputStreams", TYPE_INT32 }, + [ ANDROID_REQUEST_PIPELINE_DEPTH - ANDROID_REQUEST_START ] = + { "pipelineDepth", TYPE_BYTE }, + [ ANDROID_REQUEST_PIPELINE_MAX_DEPTH - ANDROID_REQUEST_START ] = + { "pipelineMaxDepth", TYPE_BYTE }, + [ ANDROID_REQUEST_PARTIAL_RESULT_COUNT - ANDROID_REQUEST_START ] = + { "partialResultCount", TYPE_INT32 }, + [ ANDROID_REQUEST_AVAILABLE_CAPABILITIES - ANDROID_REQUEST_START ] = + { "availableCapabilities", TYPE_BYTE }, + [ ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS - ANDROID_REQUEST_START ] = + { "availableRequestKeys", TYPE_INT32 }, + [ ANDROID_REQUEST_AVAILABLE_RESULT_KEYS - ANDROID_REQUEST_START ] = + { "availableResultKeys", TYPE_INT32 }, + [ ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS - ANDROID_REQUEST_START ] = + { "availableCharacteristicsKeys", TYPE_INT32 }, + [ ANDROID_REQUEST_AVAILABLE_SESSION_KEYS - ANDROID_REQUEST_START ] = + { "availableSessionKeys", TYPE_INT32 }, + [ ANDROID_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS - ANDROID_REQUEST_START ] = + { "availablePhysicalCameraRequestKeys", + TYPE_INT32 }, +}; + +static tag_info_t android_scaler[ANDROID_SCALER_END - + ANDROID_SCALER_START] = { + [ ANDROID_SCALER_CROP_REGION - ANDROID_SCALER_START ] = + { "cropRegion", TYPE_INT32 }, + [ ANDROID_SCALER_AVAILABLE_FORMATS - ANDROID_SCALER_START ] = + { "availableFormats", TYPE_INT32 }, + [ ANDROID_SCALER_AVAILABLE_JPEG_MIN_DURATIONS - ANDROID_SCALER_START ] = + { "availableJpegMinDurations", TYPE_INT64 }, + [ ANDROID_SCALER_AVAILABLE_JPEG_SIZES - ANDROID_SCALER_START ] = + { "availableJpegSizes", TYPE_INT32 }, + [ ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM - ANDROID_SCALER_START ] = + { "availableMaxDigitalZoom", TYPE_FLOAT }, + [ ANDROID_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS - ANDROID_SCALER_START ] = + { "availableProcessedMinDurations", + TYPE_INT64 }, + [ ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES - ANDROID_SCALER_START ] = + { "availableProcessedSizes", TYPE_INT32 }, + [ ANDROID_SCALER_AVAILABLE_RAW_MIN_DURATIONS - ANDROID_SCALER_START ] = + { "availableRawMinDurations", TYPE_INT64 }, + [ ANDROID_SCALER_AVAILABLE_RAW_SIZES - ANDROID_SCALER_START ] = + { "availableRawSizes", TYPE_INT32 }, + [ ANDROID_SCALER_AVAILABLE_INPUT_OUTPUT_FORMATS_MAP - ANDROID_SCALER_START ] = + { "availableInputOutputFormatsMap", + TYPE_INT32 }, + [ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS - ANDROID_SCALER_START ] = + { "availableStreamConfigurations", TYPE_INT32 }, + [ ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS - ANDROID_SCALER_START ] = + { "availableMinFrameDurations", TYPE_INT64 }, + [ ANDROID_SCALER_AVAILABLE_STALL_DURATIONS - ANDROID_SCALER_START ] = + { "availableStallDurations", TYPE_INT64 }, + [ ANDROID_SCALER_CROPPING_TYPE - ANDROID_SCALER_START ] = + { "croppingType", TYPE_BYTE }, +}; + +static tag_info_t android_sensor[ANDROID_SENSOR_END - + ANDROID_SENSOR_START] = { + [ ANDROID_SENSOR_EXPOSURE_TIME - ANDROID_SENSOR_START ] = + { "exposureTime", TYPE_INT64 }, + [ ANDROID_SENSOR_FRAME_DURATION - ANDROID_SENSOR_START ] = + { "frameDuration", TYPE_INT64 }, + [ ANDROID_SENSOR_SENSITIVITY - ANDROID_SENSOR_START ] = + { "sensitivity", TYPE_INT32 }, + [ ANDROID_SENSOR_REFERENCE_ILLUMINANT1 - ANDROID_SENSOR_START ] = + { "referenceIlluminant1", TYPE_BYTE }, + [ ANDROID_SENSOR_REFERENCE_ILLUMINANT2 - ANDROID_SENSOR_START ] = + { "referenceIlluminant2", TYPE_BYTE }, + [ ANDROID_SENSOR_CALIBRATION_TRANSFORM1 - ANDROID_SENSOR_START ] = + { "calibrationTransform1", TYPE_RATIONAL + }, + [ ANDROID_SENSOR_CALIBRATION_TRANSFORM2 - ANDROID_SENSOR_START ] = + { "calibrationTransform2", TYPE_RATIONAL + }, + [ ANDROID_SENSOR_COLOR_TRANSFORM1 - ANDROID_SENSOR_START ] = + { "colorTransform1", TYPE_RATIONAL + }, + [ ANDROID_SENSOR_COLOR_TRANSFORM2 - ANDROID_SENSOR_START ] = + { "colorTransform2", TYPE_RATIONAL + }, + [ ANDROID_SENSOR_FORWARD_MATRIX1 - ANDROID_SENSOR_START ] = + { "forwardMatrix1", TYPE_RATIONAL + }, + [ ANDROID_SENSOR_FORWARD_MATRIX2 - ANDROID_SENSOR_START ] = + { "forwardMatrix2", TYPE_RATIONAL + }, + [ ANDROID_SENSOR_BASE_GAIN_FACTOR - ANDROID_SENSOR_START ] = + { "baseGainFactor", TYPE_RATIONAL + }, + [ ANDROID_SENSOR_BLACK_LEVEL_PATTERN - ANDROID_SENSOR_START ] = + { "blackLevelPattern", TYPE_INT32 }, + [ ANDROID_SENSOR_MAX_ANALOG_SENSITIVITY - ANDROID_SENSOR_START ] = + { "maxAnalogSensitivity", TYPE_INT32 }, + [ ANDROID_SENSOR_ORIENTATION - ANDROID_SENSOR_START ] = + { "orientation", TYPE_INT32 }, + [ ANDROID_SENSOR_PROFILE_HUE_SAT_MAP_DIMENSIONS - ANDROID_SENSOR_START ] = + { "profileHueSatMapDimensions", TYPE_INT32 }, + [ ANDROID_SENSOR_TIMESTAMP - ANDROID_SENSOR_START ] = + { "timestamp", TYPE_INT64 }, + [ ANDROID_SENSOR_TEMPERATURE - ANDROID_SENSOR_START ] = + { "temperature", TYPE_FLOAT }, + [ ANDROID_SENSOR_NEUTRAL_COLOR_POINT - ANDROID_SENSOR_START ] = + { "neutralColorPoint", TYPE_RATIONAL + }, + [ ANDROID_SENSOR_NOISE_PROFILE - ANDROID_SENSOR_START ] = + { "noiseProfile", TYPE_DOUBLE }, + [ ANDROID_SENSOR_PROFILE_HUE_SAT_MAP - ANDROID_SENSOR_START ] = + { "profileHueSatMap", TYPE_FLOAT }, + [ ANDROID_SENSOR_PROFILE_TONE_CURVE - ANDROID_SENSOR_START ] = + { "profileToneCurve", TYPE_FLOAT }, + [ ANDROID_SENSOR_GREEN_SPLIT - ANDROID_SENSOR_START ] = + { "greenSplit", TYPE_FLOAT }, + [ ANDROID_SENSOR_TEST_PATTERN_DATA - ANDROID_SENSOR_START ] = + { "testPatternData", TYPE_INT32 }, + [ ANDROID_SENSOR_TEST_PATTERN_MODE - ANDROID_SENSOR_START ] = + { "testPatternMode", TYPE_INT32 }, + [ ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES - ANDROID_SENSOR_START ] = + { "availableTestPatternModes", TYPE_INT32 }, + [ ANDROID_SENSOR_ROLLING_SHUTTER_SKEW - ANDROID_SENSOR_START ] = + { "rollingShutterSkew", TYPE_INT64 }, + [ ANDROID_SENSOR_OPTICAL_BLACK_REGIONS - ANDROID_SENSOR_START ] = + { "opticalBlackRegions", TYPE_INT32 }, + [ ANDROID_SENSOR_DYNAMIC_BLACK_LEVEL - ANDROID_SENSOR_START ] = + { "dynamicBlackLevel", TYPE_FLOAT }, + [ ANDROID_SENSOR_DYNAMIC_WHITE_LEVEL - ANDROID_SENSOR_START ] = + { "dynamicWhiteLevel", TYPE_INT32 }, + [ ANDROID_SENSOR_OPAQUE_RAW_SIZE - ANDROID_SENSOR_START ] = + { "opaqueRawSize", TYPE_INT32 }, +}; + +static tag_info_t android_sensor_info[ANDROID_SENSOR_INFO_END - + ANDROID_SENSOR_INFO_START] = { + [ ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE - ANDROID_SENSOR_INFO_START ] = + { "activeArraySize", TYPE_INT32 }, + [ ANDROID_SENSOR_INFO_SENSITIVITY_RANGE - ANDROID_SENSOR_INFO_START ] = + { "sensitivityRange", TYPE_INT32 }, + [ ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT - ANDROID_SENSOR_INFO_START ] = + { "colorFilterArrangement", TYPE_BYTE }, + [ ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE - ANDROID_SENSOR_INFO_START ] = + { "exposureTimeRange", TYPE_INT64 }, + [ ANDROID_SENSOR_INFO_MAX_FRAME_DURATION - ANDROID_SENSOR_INFO_START ] = + { "maxFrameDuration", TYPE_INT64 }, + [ ANDROID_SENSOR_INFO_PHYSICAL_SIZE - ANDROID_SENSOR_INFO_START ] = + { "physicalSize", TYPE_FLOAT }, + [ ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE - ANDROID_SENSOR_INFO_START ] = + { "pixelArraySize", TYPE_INT32 }, + [ ANDROID_SENSOR_INFO_WHITE_LEVEL - ANDROID_SENSOR_INFO_START ] = + { "whiteLevel", TYPE_INT32 }, + [ ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE - ANDROID_SENSOR_INFO_START ] = + { "timestampSource", TYPE_BYTE }, + [ ANDROID_SENSOR_INFO_LENS_SHADING_APPLIED - ANDROID_SENSOR_INFO_START ] = + { "lensShadingApplied", TYPE_BYTE }, + [ ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE - ANDROID_SENSOR_INFO_START ] = + { "preCorrectionActiveArraySize", TYPE_INT32 }, +}; + +static tag_info_t android_shading[ANDROID_SHADING_END - + ANDROID_SHADING_START] = { + [ ANDROID_SHADING_MODE - ANDROID_SHADING_START ] = + { "mode", TYPE_BYTE }, + [ ANDROID_SHADING_STRENGTH - ANDROID_SHADING_START ] = + { "strength", TYPE_BYTE }, + [ ANDROID_SHADING_AVAILABLE_MODES - ANDROID_SHADING_START ] = + { "availableModes", TYPE_BYTE }, +}; + +static tag_info_t android_statistics[ANDROID_STATISTICS_END - + ANDROID_STATISTICS_START] = { + [ ANDROID_STATISTICS_FACE_DETECT_MODE - ANDROID_STATISTICS_START ] = + { "faceDetectMode", TYPE_BYTE }, + [ ANDROID_STATISTICS_HISTOGRAM_MODE - ANDROID_STATISTICS_START ] = + { "histogramMode", TYPE_BYTE }, + [ ANDROID_STATISTICS_SHARPNESS_MAP_MODE - ANDROID_STATISTICS_START ] = + { "sharpnessMapMode", TYPE_BYTE }, + [ ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE - ANDROID_STATISTICS_START ] = + { "hotPixelMapMode", TYPE_BYTE }, + [ ANDROID_STATISTICS_FACE_IDS - ANDROID_STATISTICS_START ] = + { "faceIds", TYPE_INT32 }, + [ ANDROID_STATISTICS_FACE_LANDMARKS - ANDROID_STATISTICS_START ] = + { "faceLandmarks", TYPE_INT32 }, + [ ANDROID_STATISTICS_FACE_RECTANGLES - ANDROID_STATISTICS_START ] = + { "faceRectangles", TYPE_INT32 }, + [ ANDROID_STATISTICS_FACE_SCORES - ANDROID_STATISTICS_START ] = + { "faceScores", TYPE_BYTE }, + [ ANDROID_STATISTICS_HISTOGRAM - ANDROID_STATISTICS_START ] = + { "histogram", TYPE_INT32 }, + [ ANDROID_STATISTICS_SHARPNESS_MAP - ANDROID_STATISTICS_START ] = + { "sharpnessMap", TYPE_INT32 }, + [ ANDROID_STATISTICS_LENS_SHADING_CORRECTION_MAP - ANDROID_STATISTICS_START ] = + { "lensShadingCorrectionMap", TYPE_BYTE }, + [ ANDROID_STATISTICS_LENS_SHADING_MAP - ANDROID_STATISTICS_START ] = + { "lensShadingMap", TYPE_FLOAT }, + [ ANDROID_STATISTICS_PREDICTED_COLOR_GAINS - ANDROID_STATISTICS_START ] = + { "predictedColorGains", TYPE_FLOAT }, + [ ANDROID_STATISTICS_PREDICTED_COLOR_TRANSFORM - ANDROID_STATISTICS_START ] = + { "predictedColorTransform", TYPE_RATIONAL + }, + [ ANDROID_STATISTICS_SCENE_FLICKER - ANDROID_STATISTICS_START ] = + { "sceneFlicker", TYPE_BYTE }, + [ ANDROID_STATISTICS_HOT_PIXEL_MAP - ANDROID_STATISTICS_START ] = + { "hotPixelMap", TYPE_INT32 }, + [ ANDROID_STATISTICS_LENS_SHADING_MAP_MODE - ANDROID_STATISTICS_START ] = + { "lensShadingMapMode", TYPE_BYTE }, + [ ANDROID_STATISTICS_OIS_DATA_MODE - ANDROID_STATISTICS_START ] = + { "oisDataMode", TYPE_BYTE }, + [ ANDROID_STATISTICS_OIS_TIMESTAMPS - ANDROID_STATISTICS_START ] = + { "oisTimestamps", TYPE_INT64 }, + [ ANDROID_STATISTICS_OIS_X_SHIFTS - ANDROID_STATISTICS_START ] = + { "oisXShifts", TYPE_FLOAT }, + [ ANDROID_STATISTICS_OIS_Y_SHIFTS - ANDROID_STATISTICS_START ] = + { "oisYShifts", TYPE_FLOAT }, +}; + +static tag_info_t android_statistics_info[ANDROID_STATISTICS_INFO_END - + ANDROID_STATISTICS_INFO_START] = { + [ ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES - ANDROID_STATISTICS_INFO_START ] = + { "availableFaceDetectModes", TYPE_BYTE }, + [ ANDROID_STATISTICS_INFO_HISTOGRAM_BUCKET_COUNT - ANDROID_STATISTICS_INFO_START ] = + { "histogramBucketCount", TYPE_INT32 }, + [ ANDROID_STATISTICS_INFO_MAX_FACE_COUNT - ANDROID_STATISTICS_INFO_START ] = + { "maxFaceCount", TYPE_INT32 }, + [ ANDROID_STATISTICS_INFO_MAX_HISTOGRAM_COUNT - ANDROID_STATISTICS_INFO_START ] = + { "maxHistogramCount", TYPE_INT32 }, + [ ANDROID_STATISTICS_INFO_MAX_SHARPNESS_MAP_VALUE - ANDROID_STATISTICS_INFO_START ] = + { "maxSharpnessMapValue", TYPE_INT32 }, + [ ANDROID_STATISTICS_INFO_SHARPNESS_MAP_SIZE - ANDROID_STATISTICS_INFO_START ] = + { "sharpnessMapSize", TYPE_INT32 }, + [ ANDROID_STATISTICS_INFO_AVAILABLE_HOT_PIXEL_MAP_MODES - ANDROID_STATISTICS_INFO_START ] = + { "availableHotPixelMapModes", TYPE_BYTE }, + [ ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES - ANDROID_STATISTICS_INFO_START ] = + { "availableLensShadingMapModes", TYPE_BYTE }, + [ ANDROID_STATISTICS_INFO_AVAILABLE_OIS_DATA_MODES - ANDROID_STATISTICS_INFO_START ] = + { "availableOisDataModes", TYPE_BYTE }, +}; + +static tag_info_t android_tonemap[ANDROID_TONEMAP_END - + ANDROID_TONEMAP_START] = { + [ ANDROID_TONEMAP_CURVE_BLUE - ANDROID_TONEMAP_START ] = + { "curveBlue", TYPE_FLOAT }, + [ ANDROID_TONEMAP_CURVE_GREEN - ANDROID_TONEMAP_START ] = + { "curveGreen", TYPE_FLOAT }, + [ ANDROID_TONEMAP_CURVE_RED - ANDROID_TONEMAP_START ] = + { "curveRed", TYPE_FLOAT }, + [ ANDROID_TONEMAP_MODE - ANDROID_TONEMAP_START ] = + { "mode", TYPE_BYTE }, + [ ANDROID_TONEMAP_MAX_CURVE_POINTS - ANDROID_TONEMAP_START ] = + { "maxCurvePoints", TYPE_INT32 }, + [ ANDROID_TONEMAP_AVAILABLE_TONE_MAP_MODES - ANDROID_TONEMAP_START ] = + { "availableToneMapModes", TYPE_BYTE }, + [ ANDROID_TONEMAP_GAMMA - ANDROID_TONEMAP_START ] = + { "gamma", TYPE_FLOAT }, + [ ANDROID_TONEMAP_PRESET_CURVE - ANDROID_TONEMAP_START ] = + { "presetCurve", TYPE_BYTE }, +}; + +static tag_info_t android_led[ANDROID_LED_END - + ANDROID_LED_START] = { + [ ANDROID_LED_TRANSMIT - ANDROID_LED_START ] = + { "transmit", TYPE_BYTE }, + [ ANDROID_LED_AVAILABLE_LEDS - ANDROID_LED_START ] = + { "availableLeds", TYPE_BYTE }, +}; + +static tag_info_t android_info[ANDROID_INFO_END - + ANDROID_INFO_START] = { + [ ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL - ANDROID_INFO_START ] = + { "supportedHardwareLevel", TYPE_BYTE }, + [ ANDROID_INFO_VERSION - ANDROID_INFO_START ] = + { "version", TYPE_BYTE }, +}; + +static tag_info_t android_black_level[ANDROID_BLACK_LEVEL_END - + ANDROID_BLACK_LEVEL_START] = { + [ ANDROID_BLACK_LEVEL_LOCK - ANDROID_BLACK_LEVEL_START ] = + { "lock", TYPE_BYTE }, +}; + +static tag_info_t android_sync[ANDROID_SYNC_END - + ANDROID_SYNC_START] = { + [ ANDROID_SYNC_FRAME_NUMBER - ANDROID_SYNC_START ] = + { "frameNumber", TYPE_INT64 }, + [ ANDROID_SYNC_MAX_LATENCY - ANDROID_SYNC_START ] = + { "maxLatency", TYPE_INT32 }, +}; + +static tag_info_t android_reprocess[ANDROID_REPROCESS_END - + ANDROID_REPROCESS_START] = { + [ ANDROID_REPROCESS_EFFECTIVE_EXPOSURE_FACTOR - ANDROID_REPROCESS_START ] = + { "effectiveExposureFactor", TYPE_FLOAT }, + [ ANDROID_REPROCESS_MAX_CAPTURE_STALL - ANDROID_REPROCESS_START ] = + { "maxCaptureStall", TYPE_INT32 }, +}; + +static tag_info_t android_depth[ANDROID_DEPTH_END - + ANDROID_DEPTH_START] = { + [ ANDROID_DEPTH_MAX_DEPTH_SAMPLES - ANDROID_DEPTH_START ] = + { "maxDepthSamples", TYPE_INT32 }, + [ ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS - ANDROID_DEPTH_START ] = + { "availableDepthStreamConfigurations", + TYPE_INT32 }, + [ ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS - ANDROID_DEPTH_START ] = + { "availableDepthMinFrameDurations", + TYPE_INT64 }, + [ ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS - ANDROID_DEPTH_START ] = + { "availableDepthStallDurations", TYPE_INT64 }, + [ ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE - ANDROID_DEPTH_START ] = + { "depthIsExclusive", TYPE_BYTE }, +}; + +static tag_info_t android_logical_multi_camera[ANDROID_LOGICAL_MULTI_CAMERA_END - + ANDROID_LOGICAL_MULTI_CAMERA_START] = { + [ ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS - ANDROID_LOGICAL_MULTI_CAMERA_START ] = + { "physicalIds", TYPE_BYTE }, + [ ANDROID_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE - ANDROID_LOGICAL_MULTI_CAMERA_START ] = + { "sensorSyncType", TYPE_BYTE }, +}; + +static tag_info_t android_distortion_correction[ANDROID_DISTORTION_CORRECTION_END - + ANDROID_DISTORTION_CORRECTION_START] = { + [ ANDROID_DISTORTION_CORRECTION_MODE - ANDROID_DISTORTION_CORRECTION_START ] = + { "mode", TYPE_BYTE }, + [ ANDROID_DISTORTION_CORRECTION_AVAILABLE_MODES - ANDROID_DISTORTION_CORRECTION_START ] = + { "availableModes", TYPE_BYTE }, +}; + + +tag_info_t *tag_info[ANDROID_SECTION_COUNT] = { + android_color_correction, + android_control, + android_demosaic, + android_edge, + android_flash, + android_flash_info, + android_hot_pixel, + android_jpeg, + android_lens, + android_lens_info, + android_noise_reduction, + android_quirks, + android_request, + android_scaler, + android_sensor, + android_sensor_info, + android_shading, + android_statistics, + android_statistics_info, + android_tonemap, + android_led, + android_info, + android_black_level, + android_sync, + android_reprocess, + android_depth, + android_logical_multi_camera, + android_distortion_correction, +}; + +int camera_metadata_enum_snprint(uint32_t tag, + uint32_t value, + char *dst, + size_t size) { + const char *msg = "error: not an enum"; + int ret = -1; + + switch(tag) { + case ANDROID_COLOR_CORRECTION_MODE: { + switch (value) { + case ANDROID_COLOR_CORRECTION_MODE_TRANSFORM_MATRIX: + msg = "TRANSFORM_MATRIX"; + ret = 0; + break; + case ANDROID_COLOR_CORRECTION_MODE_FAST: + msg = "FAST"; + ret = 0; + break; + case ANDROID_COLOR_CORRECTION_MODE_HIGH_QUALITY: + msg = "HIGH_QUALITY"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_COLOR_CORRECTION_TRANSFORM: { + break; + } + case ANDROID_COLOR_CORRECTION_GAINS: { + break; + } + case ANDROID_COLOR_CORRECTION_ABERRATION_MODE: { + switch (value) { + case ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST: + msg = "FAST"; + ret = 0; + break; + case ANDROID_COLOR_CORRECTION_ABERRATION_MODE_HIGH_QUALITY: + msg = "HIGH_QUALITY"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES: { + break; + } + + case ANDROID_CONTROL_AE_ANTIBANDING_MODE: { + switch (value) { + case ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ: + msg = "50HZ"; + ret = 0; + break; + case ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ: + msg = "60HZ"; + ret = 0; + break; + case ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO: + msg = "AUTO"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION: { + break; + } + case ANDROID_CONTROL_AE_LOCK: { + switch (value) { + case ANDROID_CONTROL_AE_LOCK_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_CONTROL_AE_LOCK_ON: + msg = "ON"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_AE_MODE: { + switch (value) { + case ANDROID_CONTROL_AE_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_CONTROL_AE_MODE_ON: + msg = "ON"; + ret = 0; + break; + case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH: + msg = "ON_AUTO_FLASH"; + ret = 0; + break; + case ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH: + msg = "ON_ALWAYS_FLASH"; + ret = 0; + break; + case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE: + msg = "ON_AUTO_FLASH_REDEYE"; + ret = 0; + break; + case ANDROID_CONTROL_AE_MODE_ON_EXTERNAL_FLASH: + msg = "ON_EXTERNAL_FLASH"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_AE_REGIONS: { + break; + } + case ANDROID_CONTROL_AE_TARGET_FPS_RANGE: { + break; + } + case ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER: { + switch (value) { + case ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE: + msg = "IDLE"; + ret = 0; + break; + case ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_START: + msg = "START"; + ret = 0; + break; + case ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL: + msg = "CANCEL"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_AF_MODE: { + switch (value) { + case ANDROID_CONTROL_AF_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_CONTROL_AF_MODE_AUTO: + msg = "AUTO"; + ret = 0; + break; + case ANDROID_CONTROL_AF_MODE_MACRO: + msg = "MACRO"; + ret = 0; + break; + case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO: + msg = "CONTINUOUS_VIDEO"; + ret = 0; + break; + case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE: + msg = "CONTINUOUS_PICTURE"; + ret = 0; + break; + case ANDROID_CONTROL_AF_MODE_EDOF: + msg = "EDOF"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_AF_REGIONS: { + break; + } + case ANDROID_CONTROL_AF_TRIGGER: { + switch (value) { + case ANDROID_CONTROL_AF_TRIGGER_IDLE: + msg = "IDLE"; + ret = 0; + break; + case ANDROID_CONTROL_AF_TRIGGER_START: + msg = "START"; + ret = 0; + break; + case ANDROID_CONTROL_AF_TRIGGER_CANCEL: + msg = "CANCEL"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_AWB_LOCK: { + switch (value) { + case ANDROID_CONTROL_AWB_LOCK_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_CONTROL_AWB_LOCK_ON: + msg = "ON"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_AWB_MODE: { + switch (value) { + case ANDROID_CONTROL_AWB_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_CONTROL_AWB_MODE_AUTO: + msg = "AUTO"; + ret = 0; + break; + case ANDROID_CONTROL_AWB_MODE_INCANDESCENT: + msg = "INCANDESCENT"; + ret = 0; + break; + case ANDROID_CONTROL_AWB_MODE_FLUORESCENT: + msg = "FLUORESCENT"; + ret = 0; + break; + case ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT: + msg = "WARM_FLUORESCENT"; + ret = 0; + break; + case ANDROID_CONTROL_AWB_MODE_DAYLIGHT: + msg = "DAYLIGHT"; + ret = 0; + break; + case ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT: + msg = "CLOUDY_DAYLIGHT"; + ret = 0; + break; + case ANDROID_CONTROL_AWB_MODE_TWILIGHT: + msg = "TWILIGHT"; + ret = 0; + break; + case ANDROID_CONTROL_AWB_MODE_SHADE: + msg = "SHADE"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_AWB_REGIONS: { + break; + } + case ANDROID_CONTROL_CAPTURE_INTENT: { + switch (value) { + case ANDROID_CONTROL_CAPTURE_INTENT_CUSTOM: + msg = "CUSTOM"; + ret = 0; + break; + case ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW: + msg = "PREVIEW"; + ret = 0; + break; + case ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE: + msg = "STILL_CAPTURE"; + ret = 0; + break; + case ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD: + msg = "VIDEO_RECORD"; + ret = 0; + break; + case ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT: + msg = "VIDEO_SNAPSHOT"; + ret = 0; + break; + case ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG: + msg = "ZERO_SHUTTER_LAG"; + ret = 0; + break; + case ANDROID_CONTROL_CAPTURE_INTENT_MANUAL: + msg = "MANUAL"; + ret = 0; + break; + case ANDROID_CONTROL_CAPTURE_INTENT_MOTION_TRACKING: + msg = "MOTION_TRACKING"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_EFFECT_MODE: { + switch (value) { + case ANDROID_CONTROL_EFFECT_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_CONTROL_EFFECT_MODE_MONO: + msg = "MONO"; + ret = 0; + break; + case ANDROID_CONTROL_EFFECT_MODE_NEGATIVE: + msg = "NEGATIVE"; + ret = 0; + break; + case ANDROID_CONTROL_EFFECT_MODE_SOLARIZE: + msg = "SOLARIZE"; + ret = 0; + break; + case ANDROID_CONTROL_EFFECT_MODE_SEPIA: + msg = "SEPIA"; + ret = 0; + break; + case ANDROID_CONTROL_EFFECT_MODE_POSTERIZE: + msg = "POSTERIZE"; + ret = 0; + break; + case ANDROID_CONTROL_EFFECT_MODE_WHITEBOARD: + msg = "WHITEBOARD"; + ret = 0; + break; + case ANDROID_CONTROL_EFFECT_MODE_BLACKBOARD: + msg = "BLACKBOARD"; + ret = 0; + break; + case ANDROID_CONTROL_EFFECT_MODE_AQUA: + msg = "AQUA"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_MODE: { + switch (value) { + case ANDROID_CONTROL_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_CONTROL_MODE_AUTO: + msg = "AUTO"; + ret = 0; + break; + case ANDROID_CONTROL_MODE_USE_SCENE_MODE: + msg = "USE_SCENE_MODE"; + ret = 0; + break; + case ANDROID_CONTROL_MODE_OFF_KEEP_STATE: + msg = "OFF_KEEP_STATE"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_SCENE_MODE: { + switch (value) { + case ANDROID_CONTROL_SCENE_MODE_DISABLED: + msg = "DISABLED"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY: + msg = "FACE_PRIORITY"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_ACTION: + msg = "ACTION"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_PORTRAIT: + msg = "PORTRAIT"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_LANDSCAPE: + msg = "LANDSCAPE"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_NIGHT: + msg = "NIGHT"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_NIGHT_PORTRAIT: + msg = "NIGHT_PORTRAIT"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_THEATRE: + msg = "THEATRE"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_BEACH: + msg = "BEACH"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_SNOW: + msg = "SNOW"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_SUNSET: + msg = "SUNSET"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_STEADYPHOTO: + msg = "STEADYPHOTO"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_FIREWORKS: + msg = "FIREWORKS"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_SPORTS: + msg = "SPORTS"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_PARTY: + msg = "PARTY"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_CANDLELIGHT: + msg = "CANDLELIGHT"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_BARCODE: + msg = "BARCODE"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO: + msg = "HIGH_SPEED_VIDEO"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_HDR: + msg = "HDR"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY_LOW_LIGHT: + msg = "FACE_PRIORITY_LOW_LIGHT"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_DEVICE_CUSTOM_START: + msg = "DEVICE_CUSTOM_START"; + ret = 0; + break; + case ANDROID_CONTROL_SCENE_MODE_DEVICE_CUSTOM_END: + msg = "DEVICE_CUSTOM_END"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_VIDEO_STABILIZATION_MODE: { + switch (value) { + case ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_ON: + msg = "ON"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES: { + break; + } + case ANDROID_CONTROL_AE_AVAILABLE_MODES: { + break; + } + case ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES: { + break; + } + case ANDROID_CONTROL_AE_COMPENSATION_RANGE: { + break; + } + case ANDROID_CONTROL_AE_COMPENSATION_STEP: { + break; + } + case ANDROID_CONTROL_AF_AVAILABLE_MODES: { + break; + } + case ANDROID_CONTROL_AVAILABLE_EFFECTS: { + break; + } + case ANDROID_CONTROL_AVAILABLE_SCENE_MODES: { + break; + } + case ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES: { + break; + } + case ANDROID_CONTROL_AWB_AVAILABLE_MODES: { + break; + } + case ANDROID_CONTROL_MAX_REGIONS: { + break; + } + case ANDROID_CONTROL_SCENE_MODE_OVERRIDES: { + break; + } + case ANDROID_CONTROL_AE_PRECAPTURE_ID: { + break; + } + case ANDROID_CONTROL_AE_STATE: { + switch (value) { + case ANDROID_CONTROL_AE_STATE_INACTIVE: + msg = "INACTIVE"; + ret = 0; + break; + case ANDROID_CONTROL_AE_STATE_SEARCHING: + msg = "SEARCHING"; + ret = 0; + break; + case ANDROID_CONTROL_AE_STATE_CONVERGED: + msg = "CONVERGED"; + ret = 0; + break; + case ANDROID_CONTROL_AE_STATE_LOCKED: + msg = "LOCKED"; + ret = 0; + break; + case ANDROID_CONTROL_AE_STATE_FLASH_REQUIRED: + msg = "FLASH_REQUIRED"; + ret = 0; + break; + case ANDROID_CONTROL_AE_STATE_PRECAPTURE: + msg = "PRECAPTURE"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_AF_STATE: { + switch (value) { + case ANDROID_CONTROL_AF_STATE_INACTIVE: + msg = "INACTIVE"; + ret = 0; + break; + case ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN: + msg = "PASSIVE_SCAN"; + ret = 0; + break; + case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED: + msg = "PASSIVE_FOCUSED"; + ret = 0; + break; + case ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN: + msg = "ACTIVE_SCAN"; + ret = 0; + break; + case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED: + msg = "FOCUSED_LOCKED"; + ret = 0; + break; + case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED: + msg = "NOT_FOCUSED_LOCKED"; + ret = 0; + break; + case ANDROID_CONTROL_AF_STATE_PASSIVE_UNFOCUSED: + msg = "PASSIVE_UNFOCUSED"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_AF_TRIGGER_ID: { + break; + } + case ANDROID_CONTROL_AWB_STATE: { + switch (value) { + case ANDROID_CONTROL_AWB_STATE_INACTIVE: + msg = "INACTIVE"; + ret = 0; + break; + case ANDROID_CONTROL_AWB_STATE_SEARCHING: + msg = "SEARCHING"; + ret = 0; + break; + case ANDROID_CONTROL_AWB_STATE_CONVERGED: + msg = "CONVERGED"; + ret = 0; + break; + case ANDROID_CONTROL_AWB_STATE_LOCKED: + msg = "LOCKED"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS: { + break; + } + case ANDROID_CONTROL_AE_LOCK_AVAILABLE: { + switch (value) { + case ANDROID_CONTROL_AE_LOCK_AVAILABLE_FALSE: + msg = "FALSE"; + ret = 0; + break; + case ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE: + msg = "TRUE"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_AWB_LOCK_AVAILABLE: { + switch (value) { + case ANDROID_CONTROL_AWB_LOCK_AVAILABLE_FALSE: + msg = "FALSE"; + ret = 0; + break; + case ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE: + msg = "TRUE"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_AVAILABLE_MODES: { + break; + } + case ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE: { + break; + } + case ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST: { + break; + } + case ANDROID_CONTROL_ENABLE_ZSL: { + switch (value) { + case ANDROID_CONTROL_ENABLE_ZSL_FALSE: + msg = "FALSE"; + ret = 0; + break; + case ANDROID_CONTROL_ENABLE_ZSL_TRUE: + msg = "TRUE"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_CONTROL_AF_SCENE_CHANGE: { + switch (value) { + case ANDROID_CONTROL_AF_SCENE_CHANGE_NOT_DETECTED: + msg = "NOT_DETECTED"; + ret = 0; + break; + case ANDROID_CONTROL_AF_SCENE_CHANGE_DETECTED: + msg = "DETECTED"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + + case ANDROID_DEMOSAIC_MODE: { + switch (value) { + case ANDROID_DEMOSAIC_MODE_FAST: + msg = "FAST"; + ret = 0; + break; + case ANDROID_DEMOSAIC_MODE_HIGH_QUALITY: + msg = "HIGH_QUALITY"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + + case ANDROID_EDGE_MODE: { + switch (value) { + case ANDROID_EDGE_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_EDGE_MODE_FAST: + msg = "FAST"; + ret = 0; + break; + case ANDROID_EDGE_MODE_HIGH_QUALITY: + msg = "HIGH_QUALITY"; + ret = 0; + break; + case ANDROID_EDGE_MODE_ZERO_SHUTTER_LAG: + msg = "ZERO_SHUTTER_LAG"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_EDGE_STRENGTH: { + break; + } + case ANDROID_EDGE_AVAILABLE_EDGE_MODES: { + break; + } + + case ANDROID_FLASH_FIRING_POWER: { + break; + } + case ANDROID_FLASH_FIRING_TIME: { + break; + } + case ANDROID_FLASH_MODE: { + switch (value) { + case ANDROID_FLASH_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_FLASH_MODE_SINGLE: + msg = "SINGLE"; + ret = 0; + break; + case ANDROID_FLASH_MODE_TORCH: + msg = "TORCH"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_FLASH_COLOR_TEMPERATURE: { + break; + } + case ANDROID_FLASH_MAX_ENERGY: { + break; + } + case ANDROID_FLASH_STATE: { + switch (value) { + case ANDROID_FLASH_STATE_UNAVAILABLE: + msg = "UNAVAILABLE"; + ret = 0; + break; + case ANDROID_FLASH_STATE_CHARGING: + msg = "CHARGING"; + ret = 0; + break; + case ANDROID_FLASH_STATE_READY: + msg = "READY"; + ret = 0; + break; + case ANDROID_FLASH_STATE_FIRED: + msg = "FIRED"; + ret = 0; + break; + case ANDROID_FLASH_STATE_PARTIAL: + msg = "PARTIAL"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + + case ANDROID_FLASH_INFO_AVAILABLE: { + switch (value) { + case ANDROID_FLASH_INFO_AVAILABLE_FALSE: + msg = "FALSE"; + ret = 0; + break; + case ANDROID_FLASH_INFO_AVAILABLE_TRUE: + msg = "TRUE"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_FLASH_INFO_CHARGE_DURATION: { + break; + } + + case ANDROID_HOT_PIXEL_MODE: { + switch (value) { + case ANDROID_HOT_PIXEL_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_HOT_PIXEL_MODE_FAST: + msg = "FAST"; + ret = 0; + break; + case ANDROID_HOT_PIXEL_MODE_HIGH_QUALITY: + msg = "HIGH_QUALITY"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_HOT_PIXEL_AVAILABLE_HOT_PIXEL_MODES: { + break; + } + + case ANDROID_JPEG_GPS_COORDINATES: { + break; + } + case ANDROID_JPEG_GPS_PROCESSING_METHOD: { + break; + } + case ANDROID_JPEG_GPS_TIMESTAMP: { + break; + } + case ANDROID_JPEG_ORIENTATION: { + break; + } + case ANDROID_JPEG_QUALITY: { + break; + } + case ANDROID_JPEG_THUMBNAIL_QUALITY: { + break; + } + case ANDROID_JPEG_THUMBNAIL_SIZE: { + break; + } + case ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES: { + break; + } + case ANDROID_JPEG_MAX_SIZE: { + break; + } + case ANDROID_JPEG_SIZE: { + break; + } + + case ANDROID_LENS_APERTURE: { + break; + } + case ANDROID_LENS_FILTER_DENSITY: { + break; + } + case ANDROID_LENS_FOCAL_LENGTH: { + break; + } + case ANDROID_LENS_FOCUS_DISTANCE: { + break; + } + case ANDROID_LENS_OPTICAL_STABILIZATION_MODE: { + switch (value) { + case ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON: + msg = "ON"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_LENS_FACING: { + switch (value) { + case ANDROID_LENS_FACING_FRONT: + msg = "FRONT"; + ret = 0; + break; + case ANDROID_LENS_FACING_BACK: + msg = "BACK"; + ret = 0; + break; + case ANDROID_LENS_FACING_EXTERNAL: + msg = "EXTERNAL"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_LENS_POSE_ROTATION: { + break; + } + case ANDROID_LENS_POSE_TRANSLATION: { + break; + } + case ANDROID_LENS_FOCUS_RANGE: { + break; + } + case ANDROID_LENS_STATE: { + switch (value) { + case ANDROID_LENS_STATE_STATIONARY: + msg = "STATIONARY"; + ret = 0; + break; + case ANDROID_LENS_STATE_MOVING: + msg = "MOVING"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_LENS_INTRINSIC_CALIBRATION: { + break; + } + case ANDROID_LENS_RADIAL_DISTORTION: { + break; + } + case ANDROID_LENS_POSE_REFERENCE: { + switch (value) { + case ANDROID_LENS_POSE_REFERENCE_PRIMARY_CAMERA: + msg = "PRIMARY_CAMERA"; + ret = 0; + break; + case ANDROID_LENS_POSE_REFERENCE_GYROSCOPE: + msg = "GYROSCOPE"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_LENS_DISTORTION: { + break; + } + + case ANDROID_LENS_INFO_AVAILABLE_APERTURES: { + break; + } + case ANDROID_LENS_INFO_AVAILABLE_FILTER_DENSITIES: { + break; + } + case ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS: { + break; + } + case ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION: { + break; + } + case ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE: { + break; + } + case ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE: { + break; + } + case ANDROID_LENS_INFO_SHADING_MAP_SIZE: { + break; + } + case ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION: { + switch (value) { + case ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_UNCALIBRATED: + msg = "UNCALIBRATED"; + ret = 0; + break; + case ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_APPROXIMATE: + msg = "APPROXIMATE"; + ret = 0; + break; + case ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_CALIBRATED: + msg = "CALIBRATED"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + + case ANDROID_NOISE_REDUCTION_MODE: { + switch (value) { + case ANDROID_NOISE_REDUCTION_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_NOISE_REDUCTION_MODE_FAST: + msg = "FAST"; + ret = 0; + break; + case ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY: + msg = "HIGH_QUALITY"; + ret = 0; + break; + case ANDROID_NOISE_REDUCTION_MODE_MINIMAL: + msg = "MINIMAL"; + ret = 0; + break; + case ANDROID_NOISE_REDUCTION_MODE_ZERO_SHUTTER_LAG: + msg = "ZERO_SHUTTER_LAG"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_NOISE_REDUCTION_STRENGTH: { + break; + } + case ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES: { + break; + } + + case ANDROID_QUIRKS_METERING_CROP_REGION: { + break; + } + case ANDROID_QUIRKS_TRIGGER_AF_WITH_AUTO: { + break; + } + case ANDROID_QUIRKS_USE_ZSL_FORMAT: { + break; + } + case ANDROID_QUIRKS_USE_PARTIAL_RESULT: { + break; + } + case ANDROID_QUIRKS_PARTIAL_RESULT: { + switch (value) { + case ANDROID_QUIRKS_PARTIAL_RESULT_FINAL: + msg = "FINAL"; + ret = 0; + break; + case ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL: + msg = "PARTIAL"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + + case ANDROID_REQUEST_FRAME_COUNT: { + break; + } + case ANDROID_REQUEST_ID: { + break; + } + case ANDROID_REQUEST_INPUT_STREAMS: { + break; + } + case ANDROID_REQUEST_METADATA_MODE: { + switch (value) { + case ANDROID_REQUEST_METADATA_MODE_NONE: + msg = "NONE"; + ret = 0; + break; + case ANDROID_REQUEST_METADATA_MODE_FULL: + msg = "FULL"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_REQUEST_OUTPUT_STREAMS: { + break; + } + case ANDROID_REQUEST_TYPE: { + switch (value) { + case ANDROID_REQUEST_TYPE_CAPTURE: + msg = "CAPTURE"; + ret = 0; + break; + case ANDROID_REQUEST_TYPE_REPROCESS: + msg = "REPROCESS"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS: { + break; + } + case ANDROID_REQUEST_MAX_NUM_REPROCESS_STREAMS: { + break; + } + case ANDROID_REQUEST_MAX_NUM_INPUT_STREAMS: { + break; + } + case ANDROID_REQUEST_PIPELINE_DEPTH: { + break; + } + case ANDROID_REQUEST_PIPELINE_MAX_DEPTH: { + break; + } + case ANDROID_REQUEST_PARTIAL_RESULT_COUNT: { + break; + } + case ANDROID_REQUEST_AVAILABLE_CAPABILITIES: { + switch (value) { + case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE: + msg = "BACKWARD_COMPATIBLE"; + ret = 0; + break; + case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR: + msg = "MANUAL_SENSOR"; + ret = 0; + break; + case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING: + msg = "MANUAL_POST_PROCESSING"; + ret = 0; + break; + case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_RAW: + msg = "RAW"; + ret = 0; + break; + case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_PRIVATE_REPROCESSING: + msg = "PRIVATE_REPROCESSING"; + ret = 0; + break; + case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_READ_SENSOR_SETTINGS: + msg = "READ_SENSOR_SETTINGS"; + ret = 0; + break; + case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE: + msg = "BURST_CAPTURE"; + ret = 0; + break; + case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_YUV_REPROCESSING: + msg = "YUV_REPROCESSING"; + ret = 0; + break; + case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT: + msg = "DEPTH_OUTPUT"; + ret = 0; + break; + case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO: + msg = "CONSTRAINED_HIGH_SPEED_VIDEO"; + ret = 0; + break; + case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MOTION_TRACKING: + msg = "MOTION_TRACKING"; + ret = 0; + break; + case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA: + msg = "LOGICAL_MULTI_CAMERA"; + ret = 0; + break; + case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MONOCHROME: + msg = "MONOCHROME"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS: { + break; + } + case ANDROID_REQUEST_AVAILABLE_RESULT_KEYS: { + break; + } + case ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS: { + break; + } + case ANDROID_REQUEST_AVAILABLE_SESSION_KEYS: { + break; + } + case ANDROID_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS: { + break; + } + + case ANDROID_SCALER_CROP_REGION: { + break; + } + case ANDROID_SCALER_AVAILABLE_FORMATS: { + switch (value) { + case ANDROID_SCALER_AVAILABLE_FORMATS_RAW16: + msg = "RAW16"; + ret = 0; + break; + case ANDROID_SCALER_AVAILABLE_FORMATS_RAW_OPAQUE: + msg = "RAW_OPAQUE"; + ret = 0; + break; + case ANDROID_SCALER_AVAILABLE_FORMATS_YV12: + msg = "YV12"; + ret = 0; + break; + case ANDROID_SCALER_AVAILABLE_FORMATS_YCrCb_420_SP: + msg = "YCrCb_420_SP"; + ret = 0; + break; + case ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED: + msg = "IMPLEMENTATION_DEFINED"; + ret = 0; + break; + case ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888: + msg = "YCbCr_420_888"; + ret = 0; + break; + case ANDROID_SCALER_AVAILABLE_FORMATS_BLOB: + msg = "BLOB"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_SCALER_AVAILABLE_JPEG_MIN_DURATIONS: { + break; + } + case ANDROID_SCALER_AVAILABLE_JPEG_SIZES: { + break; + } + case ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM: { + break; + } + case ANDROID_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS: { + break; + } + case ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES: { + break; + } + case ANDROID_SCALER_AVAILABLE_RAW_MIN_DURATIONS: { + break; + } + case ANDROID_SCALER_AVAILABLE_RAW_SIZES: { + break; + } + case ANDROID_SCALER_AVAILABLE_INPUT_OUTPUT_FORMATS_MAP: { + break; + } + case ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS: { + switch (value) { + case ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT: + msg = "OUTPUT"; + ret = 0; + break; + case ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_INPUT: + msg = "INPUT"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS: { + break; + } + case ANDROID_SCALER_AVAILABLE_STALL_DURATIONS: { + break; + } + case ANDROID_SCALER_CROPPING_TYPE: { + switch (value) { + case ANDROID_SCALER_CROPPING_TYPE_CENTER_ONLY: + msg = "CENTER_ONLY"; + ret = 0; + break; + case ANDROID_SCALER_CROPPING_TYPE_FREEFORM: + msg = "FREEFORM"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + + case ANDROID_SENSOR_EXPOSURE_TIME: { + break; + } + case ANDROID_SENSOR_FRAME_DURATION: { + break; + } + case ANDROID_SENSOR_SENSITIVITY: { + break; + } + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1: { + switch (value) { + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT: + msg = "DAYLIGHT"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_FLUORESCENT: + msg = "FLUORESCENT"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_TUNGSTEN: + msg = "TUNGSTEN"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_FLASH: + msg = "FLASH"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_FINE_WEATHER: + msg = "FINE_WEATHER"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_CLOUDY_WEATHER: + msg = "CLOUDY_WEATHER"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_SHADE: + msg = "SHADE"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT_FLUORESCENT: + msg = "DAYLIGHT_FLUORESCENT"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAY_WHITE_FLUORESCENT: + msg = "DAY_WHITE_FLUORESCENT"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_COOL_WHITE_FLUORESCENT: + msg = "COOL_WHITE_FLUORESCENT"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_WHITE_FLUORESCENT: + msg = "WHITE_FLUORESCENT"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_STANDARD_A: + msg = "STANDARD_A"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_STANDARD_B: + msg = "STANDARD_B"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_STANDARD_C: + msg = "STANDARD_C"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D55: + msg = "D55"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D65: + msg = "D65"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D75: + msg = "D75"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D50: + msg = "D50"; + ret = 0; + break; + case ANDROID_SENSOR_REFERENCE_ILLUMINANT1_ISO_STUDIO_TUNGSTEN: + msg = "ISO_STUDIO_TUNGSTEN"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_SENSOR_REFERENCE_ILLUMINANT2: { + break; + } + case ANDROID_SENSOR_CALIBRATION_TRANSFORM1: { + break; + } + case ANDROID_SENSOR_CALIBRATION_TRANSFORM2: { + break; + } + case ANDROID_SENSOR_COLOR_TRANSFORM1: { + break; + } + case ANDROID_SENSOR_COLOR_TRANSFORM2: { + break; + } + case ANDROID_SENSOR_FORWARD_MATRIX1: { + break; + } + case ANDROID_SENSOR_FORWARD_MATRIX2: { + break; + } + case ANDROID_SENSOR_BASE_GAIN_FACTOR: { + break; + } + case ANDROID_SENSOR_BLACK_LEVEL_PATTERN: { + break; + } + case ANDROID_SENSOR_MAX_ANALOG_SENSITIVITY: { + break; + } + case ANDROID_SENSOR_ORIENTATION: { + break; + } + case ANDROID_SENSOR_PROFILE_HUE_SAT_MAP_DIMENSIONS: { + break; + } + case ANDROID_SENSOR_TIMESTAMP: { + break; + } + case ANDROID_SENSOR_TEMPERATURE: { + break; + } + case ANDROID_SENSOR_NEUTRAL_COLOR_POINT: { + break; + } + case ANDROID_SENSOR_NOISE_PROFILE: { + break; + } + case ANDROID_SENSOR_PROFILE_HUE_SAT_MAP: { + break; + } + case ANDROID_SENSOR_PROFILE_TONE_CURVE: { + break; + } + case ANDROID_SENSOR_GREEN_SPLIT: { + break; + } + case ANDROID_SENSOR_TEST_PATTERN_DATA: { + break; + } + case ANDROID_SENSOR_TEST_PATTERN_MODE: { + switch (value) { + case ANDROID_SENSOR_TEST_PATTERN_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR: + msg = "SOLID_COLOR"; + ret = 0; + break; + case ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS: + msg = "COLOR_BARS"; + ret = 0; + break; + case ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS_FADE_TO_GRAY: + msg = "COLOR_BARS_FADE_TO_GRAY"; + ret = 0; + break; + case ANDROID_SENSOR_TEST_PATTERN_MODE_PN9: + msg = "PN9"; + ret = 0; + break; + case ANDROID_SENSOR_TEST_PATTERN_MODE_CUSTOM1: + msg = "CUSTOM1"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES: { + break; + } + case ANDROID_SENSOR_ROLLING_SHUTTER_SKEW: { + break; + } + case ANDROID_SENSOR_OPTICAL_BLACK_REGIONS: { + break; + } + case ANDROID_SENSOR_DYNAMIC_BLACK_LEVEL: { + break; + } + case ANDROID_SENSOR_DYNAMIC_WHITE_LEVEL: { + break; + } + case ANDROID_SENSOR_OPAQUE_RAW_SIZE: { + break; + } + + case ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE: { + break; + } + case ANDROID_SENSOR_INFO_SENSITIVITY_RANGE: { + break; + } + case ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT: { + switch (value) { + case ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_RGGB: + msg = "RGGB"; + ret = 0; + break; + case ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_GRBG: + msg = "GRBG"; + ret = 0; + break; + case ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_GBRG: + msg = "GBRG"; + ret = 0; + break; + case ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_BGGR: + msg = "BGGR"; + ret = 0; + break; + case ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_RGB: + msg = "RGB"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE: { + break; + } + case ANDROID_SENSOR_INFO_MAX_FRAME_DURATION: { + break; + } + case ANDROID_SENSOR_INFO_PHYSICAL_SIZE: { + break; + } + case ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE: { + break; + } + case ANDROID_SENSOR_INFO_WHITE_LEVEL: { + break; + } + case ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE: { + switch (value) { + case ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_UNKNOWN: + msg = "UNKNOWN"; + ret = 0; + break; + case ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_REALTIME: + msg = "REALTIME"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_SENSOR_INFO_LENS_SHADING_APPLIED: { + switch (value) { + case ANDROID_SENSOR_INFO_LENS_SHADING_APPLIED_FALSE: + msg = "FALSE"; + ret = 0; + break; + case ANDROID_SENSOR_INFO_LENS_SHADING_APPLIED_TRUE: + msg = "TRUE"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE: { + break; + } + + case ANDROID_SHADING_MODE: { + switch (value) { + case ANDROID_SHADING_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_SHADING_MODE_FAST: + msg = "FAST"; + ret = 0; + break; + case ANDROID_SHADING_MODE_HIGH_QUALITY: + msg = "HIGH_QUALITY"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_SHADING_STRENGTH: { + break; + } + case ANDROID_SHADING_AVAILABLE_MODES: { + break; + } + + case ANDROID_STATISTICS_FACE_DETECT_MODE: { + switch (value) { + case ANDROID_STATISTICS_FACE_DETECT_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE: + msg = "SIMPLE"; + ret = 0; + break; + case ANDROID_STATISTICS_FACE_DETECT_MODE_FULL: + msg = "FULL"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_STATISTICS_HISTOGRAM_MODE: { + switch (value) { + case ANDROID_STATISTICS_HISTOGRAM_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_STATISTICS_HISTOGRAM_MODE_ON: + msg = "ON"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_STATISTICS_SHARPNESS_MAP_MODE: { + switch (value) { + case ANDROID_STATISTICS_SHARPNESS_MAP_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_STATISTICS_SHARPNESS_MAP_MODE_ON: + msg = "ON"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE: { + switch (value) { + case ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_ON: + msg = "ON"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_STATISTICS_FACE_IDS: { + break; + } + case ANDROID_STATISTICS_FACE_LANDMARKS: { + break; + } + case ANDROID_STATISTICS_FACE_RECTANGLES: { + break; + } + case ANDROID_STATISTICS_FACE_SCORES: { + break; + } + case ANDROID_STATISTICS_HISTOGRAM: { + break; + } + case ANDROID_STATISTICS_SHARPNESS_MAP: { + break; + } + case ANDROID_STATISTICS_LENS_SHADING_CORRECTION_MAP: { + break; + } + case ANDROID_STATISTICS_LENS_SHADING_MAP: { + break; + } + case ANDROID_STATISTICS_PREDICTED_COLOR_GAINS: { + break; + } + case ANDROID_STATISTICS_PREDICTED_COLOR_TRANSFORM: { + break; + } + case ANDROID_STATISTICS_SCENE_FLICKER: { + switch (value) { + case ANDROID_STATISTICS_SCENE_FLICKER_NONE: + msg = "NONE"; + ret = 0; + break; + case ANDROID_STATISTICS_SCENE_FLICKER_50HZ: + msg = "50HZ"; + ret = 0; + break; + case ANDROID_STATISTICS_SCENE_FLICKER_60HZ: + msg = "60HZ"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_STATISTICS_HOT_PIXEL_MAP: { + break; + } + case ANDROID_STATISTICS_LENS_SHADING_MAP_MODE: { + switch (value) { + case ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_ON: + msg = "ON"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_STATISTICS_OIS_DATA_MODE: { + switch (value) { + case ANDROID_STATISTICS_OIS_DATA_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_STATISTICS_OIS_DATA_MODE_ON: + msg = "ON"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_STATISTICS_OIS_TIMESTAMPS: { + break; + } + case ANDROID_STATISTICS_OIS_X_SHIFTS: { + break; + } + case ANDROID_STATISTICS_OIS_Y_SHIFTS: { + break; + } + + case ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES: { + break; + } + case ANDROID_STATISTICS_INFO_HISTOGRAM_BUCKET_COUNT: { + break; + } + case ANDROID_STATISTICS_INFO_MAX_FACE_COUNT: { + break; + } + case ANDROID_STATISTICS_INFO_MAX_HISTOGRAM_COUNT: { + break; + } + case ANDROID_STATISTICS_INFO_MAX_SHARPNESS_MAP_VALUE: { + break; + } + case ANDROID_STATISTICS_INFO_SHARPNESS_MAP_SIZE: { + break; + } + case ANDROID_STATISTICS_INFO_AVAILABLE_HOT_PIXEL_MAP_MODES: { + break; + } + case ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES: { + break; + } + case ANDROID_STATISTICS_INFO_AVAILABLE_OIS_DATA_MODES: { + break; + } + + case ANDROID_TONEMAP_CURVE_BLUE: { + break; + } + case ANDROID_TONEMAP_CURVE_GREEN: { + break; + } + case ANDROID_TONEMAP_CURVE_RED: { + break; + } + case ANDROID_TONEMAP_MODE: { + switch (value) { + case ANDROID_TONEMAP_MODE_CONTRAST_CURVE: + msg = "CONTRAST_CURVE"; + ret = 0; + break; + case ANDROID_TONEMAP_MODE_FAST: + msg = "FAST"; + ret = 0; + break; + case ANDROID_TONEMAP_MODE_HIGH_QUALITY: + msg = "HIGH_QUALITY"; + ret = 0; + break; + case ANDROID_TONEMAP_MODE_GAMMA_VALUE: + msg = "GAMMA_VALUE"; + ret = 0; + break; + case ANDROID_TONEMAP_MODE_PRESET_CURVE: + msg = "PRESET_CURVE"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_TONEMAP_MAX_CURVE_POINTS: { + break; + } + case ANDROID_TONEMAP_AVAILABLE_TONE_MAP_MODES: { + break; + } + case ANDROID_TONEMAP_GAMMA: { + break; + } + case ANDROID_TONEMAP_PRESET_CURVE: { + switch (value) { + case ANDROID_TONEMAP_PRESET_CURVE_SRGB: + msg = "SRGB"; + ret = 0; + break; + case ANDROID_TONEMAP_PRESET_CURVE_REC709: + msg = "REC709"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + + case ANDROID_LED_TRANSMIT: { + switch (value) { + case ANDROID_LED_TRANSMIT_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_LED_TRANSMIT_ON: + msg = "ON"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_LED_AVAILABLE_LEDS: { + switch (value) { + case ANDROID_LED_AVAILABLE_LEDS_TRANSMIT: + msg = "TRANSMIT"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + + case ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL: { + switch (value) { + case ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED: + msg = "LIMITED"; + ret = 0; + break; + case ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL: + msg = "FULL"; + ret = 0; + break; + case ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY: + msg = "LEGACY"; + ret = 0; + break; + case ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_3: + msg = "3"; + ret = 0; + break; + case ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL: + msg = "EXTERNAL"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_INFO_VERSION: { + break; + } + + case ANDROID_BLACK_LEVEL_LOCK: { + switch (value) { + case ANDROID_BLACK_LEVEL_LOCK_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_BLACK_LEVEL_LOCK_ON: + msg = "ON"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + + case ANDROID_SYNC_FRAME_NUMBER: { + switch (value) { + case ANDROID_SYNC_FRAME_NUMBER_CONVERGING: + msg = "CONVERGING"; + ret = 0; + break; + case ANDROID_SYNC_FRAME_NUMBER_UNKNOWN: + msg = "UNKNOWN"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_SYNC_MAX_LATENCY: { + switch (value) { + case ANDROID_SYNC_MAX_LATENCY_PER_FRAME_CONTROL: + msg = "PER_FRAME_CONTROL"; + ret = 0; + break; + case ANDROID_SYNC_MAX_LATENCY_UNKNOWN: + msg = "UNKNOWN"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + + case ANDROID_REPROCESS_EFFECTIVE_EXPOSURE_FACTOR: { + break; + } + case ANDROID_REPROCESS_MAX_CAPTURE_STALL: { + break; + } + + case ANDROID_DEPTH_MAX_DEPTH_SAMPLES: { + break; + } + case ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS: { + switch (value) { + case ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_OUTPUT: + msg = "OUTPUT"; + ret = 0; + break; + case ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_INPUT: + msg = "INPUT"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS: { + break; + } + case ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS: { + break; + } + case ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE: { + switch (value) { + case ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE_FALSE: + msg = "FALSE"; + ret = 0; + break; + case ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE_TRUE: + msg = "TRUE"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + + case ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS: { + break; + } + case ANDROID_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE: { + switch (value) { + case ANDROID_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE_APPROXIMATE: + msg = "APPROXIMATE"; + ret = 0; + break; + case ANDROID_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE_CALIBRATED: + msg = "CALIBRATED"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + + case ANDROID_DISTORTION_CORRECTION_MODE: { + switch (value) { + case ANDROID_DISTORTION_CORRECTION_MODE_OFF: + msg = "OFF"; + ret = 0; + break; + case ANDROID_DISTORTION_CORRECTION_MODE_FAST: + msg = "FAST"; + ret = 0; + break; + case ANDROID_DISTORTION_CORRECTION_MODE_HIGH_QUALITY: + msg = "HIGH_QUALITY"; + ret = 0; + break; + default: + msg = "error: enum value out of range"; + } + break; + } + case ANDROID_DISTORTION_CORRECTION_AVAILABLE_MODES: { + break; + } + + } + + strncpy(dst, msg, size - 1); + dst[size - 1] = '\0'; + + return ret; +} + + +#define CAMERA_METADATA_ENUM_STRING_MAX_SIZE 29 diff --git a/spider-cam/libcamera/src/android/mm/cros_camera_buffer.cpp b/spider-cam/libcamera/src/android/mm/cros_camera_buffer.cpp new file mode 100644 index 0000000..e2a44a2 --- /dev/null +++ b/spider-cam/libcamera/src/android/mm/cros_camera_buffer.cpp @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Chromium OS buffer backend using CameraBufferManager + */ + +#include "../camera_buffer.h" + +#include + +#include "cros-camera/camera_buffer_manager.h" + +using namespace libcamera; + +LOG_DECLARE_CATEGORY(HAL) + +class CameraBuffer::Private : public Extensible::Private +{ + LIBCAMERA_DECLARE_PUBLIC(CameraBuffer) + +public: + Private(CameraBuffer *cameraBuffer, buffer_handle_t camera3Buffer, + PixelFormat pixelFormat, const Size &size, + int flags); + ~Private(); + + bool isValid() const { return registered_; } + + unsigned int numPlanes() const; + + Span plane(unsigned int plane); + + unsigned int stride(unsigned int plane) const; + unsigned int offset(unsigned int plane) const; + unsigned int size(unsigned int plane) const; + + size_t jpegBufferSize(size_t maxJpegBufferSize) const; + +private: + void map(); + + cros::CameraBufferManager *bufferManager_; + buffer_handle_t handle_; + unsigned int numPlanes_; + bool mapped_; + bool registered_; + union { + void *addr; + android_ycbcr ycbcr; + } mem; +}; + +CameraBuffer::Private::Private([[maybe_unused]] CameraBuffer *cameraBuffer, + buffer_handle_t camera3Buffer, + [[maybe_unused]] PixelFormat pixelFormat, + [[maybe_unused]] const Size &size, + [[maybe_unused]] int flags) + : handle_(camera3Buffer), numPlanes_(0), mapped_(false), + registered_(false) +{ + bufferManager_ = cros::CameraBufferManager::GetInstance(); + if (!bufferManager_) { + LOG(HAL, Fatal) + << "Failed to get cros CameraBufferManager instance"; + return; + } + + int ret = bufferManager_->Register(camera3Buffer); + if (ret) { + LOG(HAL, Error) << "Failed registering a buffer: " << ret; + return; + } + + registered_ = true; + numPlanes_ = bufferManager_->GetNumPlanes(camera3Buffer); +} + +CameraBuffer::Private::~Private() +{ + int ret; + if (mapped_) { + ret = bufferManager_->Unlock(handle_); + if (ret != 0) + LOG(HAL, Error) << "Failed to unlock buffer: " + << strerror(-ret); + } + + if (registered_) { + ret = bufferManager_->Deregister(handle_); + if (ret != 0) + LOG(HAL, Error) << "Failed to deregister buffer: " + << strerror(-ret); + } +} + +unsigned int CameraBuffer::Private::numPlanes() const +{ + return bufferManager_->GetNumPlanes(handle_); +} + +Span CameraBuffer::Private::plane(unsigned int plane) +{ + if (!mapped_) + map(); + if (!mapped_) + return {}; + + void *addr; + + switch (numPlanes()) { + case 1: + addr = mem.addr; + break; + default: + switch (plane) { + case 0: + addr = mem.ycbcr.y; + break; + case 1: + addr = mem.ycbcr.cb; + break; + case 2: + addr = mem.ycbcr.cr; + break; + } + } + + return { static_cast(addr), + bufferManager_->GetPlaneSize(handle_, plane) }; +} + +unsigned int CameraBuffer::Private::stride(unsigned int plane) const +{ + return cros::CameraBufferManager::GetPlaneStride(handle_, plane); +} + +unsigned int CameraBuffer::Private::offset(unsigned int plane) const +{ + return cros::CameraBufferManager::GetPlaneOffset(handle_, plane); +} + +unsigned int CameraBuffer::Private::size(unsigned int plane) const +{ + return cros::CameraBufferManager::GetPlaneSize(handle_, plane); +} + +size_t CameraBuffer::Private::jpegBufferSize([[maybe_unused]] size_t maxJpegBufferSize) const +{ + return bufferManager_->GetPlaneSize(handle_, 0); +} + +void CameraBuffer::Private::map() +{ + int ret; + switch (numPlanes_) { + case 1: { + ret = bufferManager_->Lock(handle_, 0, 0, 0, 0, 0, &mem.addr); + if (ret) { + LOG(HAL, Error) << "Single plane buffer mapping failed"; + return; + } + break; + } + case 2: + case 3: { + ret = bufferManager_->LockYCbCr(handle_, 0, 0, 0, 0, 0, + &mem.ycbcr); + if (ret) { + LOG(HAL, Error) << "YCbCr buffer mapping failed"; + return; + } + break; + } + default: + LOG(HAL, Error) << "Invalid number of planes: " << numPlanes_; + return; + } + + mapped_ = true; + return; +} + +PUBLIC_CAMERA_BUFFER_IMPLEMENTATION diff --git a/spider-cam/libcamera/src/android/mm/cros_frame_buffer_allocator.cpp b/spider-cam/libcamera/src/android/mm/cros_frame_buffer_allocator.cpp new file mode 100644 index 0000000..264c0d4 --- /dev/null +++ b/spider-cam/libcamera/src/android/mm/cros_frame_buffer_allocator.cpp @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Allocate FrameBuffer for Chromium OS using CameraBufferManager + */ + +#include +#include + +#include +#include + +#include "libcamera/internal/framebuffer.h" + +#include "../camera_device.h" +#include "../frame_buffer_allocator.h" +#include "../hal_framebuffer.h" +#include "cros-camera/camera_buffer_manager.h" + +using namespace libcamera; + +LOG_DECLARE_CATEGORY(HAL) + +namespace { +class CrosFrameBufferData : public FrameBuffer::Private +{ + LIBCAMERA_DECLARE_PUBLIC(FrameBuffer) + +public: + CrosFrameBufferData(cros::ScopedBufferHandle scopedHandle, + const std::vector &planes) + : FrameBuffer::Private(planes), scopedHandle_(std::move(scopedHandle)) + { + } + +private: + cros::ScopedBufferHandle scopedHandle_; +}; +} /* namespace */ + +class PlatformFrameBufferAllocator::Private : public Extensible::Private +{ + LIBCAMERA_DECLARE_PUBLIC(PlatformFrameBufferAllocator) + +public: + Private([[maybe_unused]] CameraDevice *const cameraDevice) + { + } + + std::unique_ptr + allocate(int halPixelFormat, const libcamera::Size &size, uint32_t usage); +}; + +std::unique_ptr +PlatformFrameBufferAllocator::Private::allocate(int halPixelFormat, + const libcamera::Size &size, + uint32_t usage) +{ + cros::ScopedBufferHandle scopedHandle = + cros::CameraBufferManager::AllocateScopedBuffer( + size.width, size.height, halPixelFormat, usage); + if (!scopedHandle) { + LOG(HAL, Error) << "Failed to allocate buffer handle"; + return nullptr; + } + + buffer_handle_t handle = *scopedHandle; + SharedFD fd{ handle->data[0] }; + if (!fd.isValid()) { + LOG(HAL, Fatal) << "Invalid fd"; + return nullptr; + } + + /* This code assumes all the planes are located in the same buffer. */ + const size_t numPlanes = cros::CameraBufferManager::GetNumPlanes(handle); + std::vector planes(numPlanes); + for (auto [i, plane] : utils::enumerate(planes)) { + plane.fd = fd; + plane.offset = cros::CameraBufferManager::GetPlaneOffset(handle, i); + plane.length = cros::CameraBufferManager::GetPlaneSize(handle, i); + } + + return std::make_unique( + std::make_unique(std::move(scopedHandle), planes), handle); +} + +PUBLIC_FRAME_BUFFER_ALLOCATOR_IMPLEMENTATION diff --git a/spider-cam/libcamera/src/android/mm/generic_camera_buffer.cpp b/spider-cam/libcamera/src/android/mm/generic_camera_buffer.cpp new file mode 100644 index 0000000..0ffcb44 --- /dev/null +++ b/spider-cam/libcamera/src/android/mm/generic_camera_buffer.cpp @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Generic Android frame buffer backend + */ + +#include "../camera_buffer.h" + +#include +#include + +#include + +#include "libcamera/internal/formats.h" +#include "libcamera/internal/mapped_framebuffer.h" + +using namespace libcamera; + +LOG_DECLARE_CATEGORY(HAL) + +class CameraBuffer::Private : public Extensible::Private, + public MappedBuffer +{ + LIBCAMERA_DECLARE_PUBLIC(CameraBuffer) + +public: + Private(CameraBuffer *cameraBuffer, buffer_handle_t camera3Buffer, + PixelFormat pixelFormat, const Size &size, int flags); + ~Private(); + + unsigned int numPlanes() const; + + Span plane(unsigned int plane); + + unsigned int stride(unsigned int plane) const; + unsigned int offset(unsigned int plane) const; + unsigned int size(unsigned int plane) const; + + size_t jpegBufferSize(size_t maxJpegBufferSize) const; + +private: + struct PlaneInfo { + unsigned int stride; + unsigned int offset; + unsigned int size; + }; + + void map(); + + int fd_; + int flags_; + off_t bufferLength_; + bool mapped_; + std::vector planeInfo_; +}; + +CameraBuffer::Private::Private([[maybe_unused]] CameraBuffer *cameraBuffer, + buffer_handle_t camera3Buffer, + PixelFormat pixelFormat, + const Size &size, int flags) + : fd_(-1), flags_(flags), bufferLength_(-1), mapped_(false) +{ + error_ = 0; + + const auto &info = PixelFormatInfo::info(pixelFormat); + if (!info.isValid()) { + error_ = -EINVAL; + LOG(HAL, Error) << "Invalid pixel format: " << pixelFormat; + return; + } + + /* + * As Android doesn't offer an API to query buffer layouts, assume for + * now that the buffer is backed by a single dmabuf, with planes being + * stored contiguously. + */ + for (int i = 0; i < camera3Buffer->numFds; i++) { + if (camera3Buffer->data[i] == -1 || camera3Buffer->data[i] == fd_) + continue; + + if (fd_ != -1) { + error_ = -EINVAL; + LOG(HAL, Error) << "Discontiguous planes are not supported"; + return; + } + + fd_ = camera3Buffer->data[i]; + } + + if (fd_ == -1) { + error_ = -EINVAL; + LOG(HAL, Error) << "No valid file descriptor"; + return; + } + + bufferLength_ = lseek(fd_, 0, SEEK_END); + if (bufferLength_ < 0) { + error_ = -errno; + LOG(HAL, Error) << "Failed to get buffer length"; + return; + } + + const unsigned int numPlanes = info.numPlanes(); + planeInfo_.resize(numPlanes); + + unsigned int offset = 0; + for (unsigned int i = 0; i < numPlanes; ++i) { + const unsigned int planeSize = info.planeSize(size, i); + + planeInfo_[i].stride = info.stride(size.width, i, 1u); + planeInfo_[i].offset = offset; + planeInfo_[i].size = planeSize; + + if (bufferLength_ < offset + planeSize) { + LOG(HAL, Error) << "Plane " << i << " is out of buffer:" + << " plane offset=" << offset + << ", plane size=" << planeSize + << ", buffer length=" << bufferLength_; + return; + } + + offset += planeSize; + } +} + +CameraBuffer::Private::~Private() +{ +} + +unsigned int CameraBuffer::Private::numPlanes() const +{ + return planeInfo_.size(); +} + +Span CameraBuffer::Private::plane(unsigned int plane) +{ + if (!mapped_) + map(); + if (!mapped_) + return {}; + + return planes_[plane]; +} + +unsigned int CameraBuffer::Private::stride(unsigned int plane) const +{ + if (plane >= planeInfo_.size()) + return 0; + + return planeInfo_[plane].stride; +} + +unsigned int CameraBuffer::Private::offset(unsigned int plane) const +{ + if (plane >= planeInfo_.size()) + return 0; + + return planeInfo_[plane].offset; +} + +unsigned int CameraBuffer::Private::size(unsigned int plane) const +{ + if (plane >= planeInfo_.size()) + return 0; + + return planeInfo_[plane].size; +} + +size_t CameraBuffer::Private::jpegBufferSize(size_t maxJpegBufferSize) const +{ + ASSERT(bufferLength_ >= 0); + + return std::min(bufferLength_, maxJpegBufferSize); +} + +void CameraBuffer::Private::map() +{ + ASSERT(fd_ != -1); + ASSERT(bufferLength_ >= 0); + + void *address = mmap(nullptr, bufferLength_, flags_, MAP_SHARED, fd_, 0); + if (address == MAP_FAILED) { + error_ = -errno; + LOG(HAL, Error) << "Failed to mmap plane"; + return; + } + maps_.emplace_back(static_cast(address), bufferLength_); + + planes_.reserve(planeInfo_.size()); + for (const auto &info : planeInfo_) { + planes_.emplace_back( + static_cast(address) + info.offset, info.size); + } + + mapped_ = true; +} + +PUBLIC_CAMERA_BUFFER_IMPLEMENTATION diff --git a/spider-cam/libcamera/src/android/mm/generic_frame_buffer_allocator.cpp b/spider-cam/libcamera/src/android/mm/generic_frame_buffer_allocator.cpp new file mode 100644 index 0000000..79625a9 --- /dev/null +++ b/spider-cam/libcamera/src/android/mm/generic_frame_buffer_allocator.cpp @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Allocate FrameBuffer using gralloc API + */ + +#include +#include +#include + +#include +#include + +#include "libcamera/internal/formats.h" +#include "libcamera/internal/framebuffer.h" + +#include +#include +#include + +#include "../camera_device.h" +#include "../frame_buffer_allocator.h" +#include "../hal_framebuffer.h" + +using namespace libcamera; + +LOG_DECLARE_CATEGORY(HAL) + +namespace { +class GenericFrameBufferData : public FrameBuffer::Private +{ + LIBCAMERA_DECLARE_PUBLIC(FrameBuffer) + +public: + GenericFrameBufferData(struct alloc_device_t *allocDevice, + buffer_handle_t handle, + const std::vector &planes) + : FrameBuffer::Private(planes), allocDevice_(allocDevice), + handle_(handle) + { + ASSERT(allocDevice_); + ASSERT(handle_); + } + + ~GenericFrameBufferData() override + { + /* + * allocDevice_ is used to destroy handle_. allocDevice_ is + * owned by PlatformFrameBufferAllocator::Private. + * GenericFrameBufferData must be destroyed before it is + * destroyed. + * + * \todo Consider managing alloc_device_t with std::shared_ptr + * if this is difficult to maintain. + * + * \todo Thread safety against alloc_device_t is not documented. + * Is it no problem to call alloc/free in parallel? + */ + allocDevice_->free(allocDevice_, handle_); + } + +private: + struct alloc_device_t *allocDevice_; + const buffer_handle_t handle_; +}; +} /* namespace */ + +class PlatformFrameBufferAllocator::Private : public Extensible::Private +{ + LIBCAMERA_DECLARE_PUBLIC(PlatformFrameBufferAllocator) + +public: + Private(CameraDevice *const cameraDevice) + : cameraDevice_(cameraDevice), + hardwareModule_(nullptr), + allocDevice_(nullptr) + { + hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &hardwareModule_); + ASSERT(hardwareModule_); + } + + ~Private() override; + + std::unique_ptr + allocate(int halPixelFormat, const libcamera::Size &size, uint32_t usage); + +private: + const CameraDevice *const cameraDevice_; + const struct hw_module_t *hardwareModule_; + struct alloc_device_t *allocDevice_; +}; + +PlatformFrameBufferAllocator::Private::~Private() +{ + if (allocDevice_) + gralloc_close(allocDevice_); + dlclose(hardwareModule_->dso); +} + +std::unique_ptr +PlatformFrameBufferAllocator::Private::allocate(int halPixelFormat, + const libcamera::Size &size, + uint32_t usage) +{ + if (!allocDevice_) { + int ret = gralloc_open(hardwareModule_, &allocDevice_); + if (ret) { + LOG(HAL, Fatal) << "gralloc_open() failed: " << ret; + return nullptr; + } + } + + int stride = 0; + buffer_handle_t handle = nullptr; + int ret = allocDevice_->alloc(allocDevice_, size.width, size.height, + halPixelFormat, usage, &handle, &stride); + if (ret) { + LOG(HAL, Error) << "failed buffer allocation: " << ret; + return nullptr; + } + if (!handle) { + LOG(HAL, Fatal) << "invalid buffer_handle_t"; + return nullptr; + } + + /* This code assumes the planes are mapped consecutively. */ + const libcamera::PixelFormat pixelFormat = + cameraDevice_->capabilities()->toPixelFormat(halPixelFormat); + const auto &info = PixelFormatInfo::info(pixelFormat); + std::vector planes(info.numPlanes()); + + SharedFD fd{ handle->data[0] }; + size_t offset = 0; + for (auto [i, plane] : utils::enumerate(planes)) { + const size_t planeSize = info.planeSize(size.height, i, stride); + + plane.fd = fd; + plane.offset = offset; + plane.length = planeSize; + offset += planeSize; + } + + return std::make_unique( + std::make_unique( + allocDevice_, handle, planes), + handle); +} + +PUBLIC_FRAME_BUFFER_ALLOCATOR_IMPLEMENTATION diff --git a/spider-cam/libcamera/src/android/mm/libhardware_stub.c b/spider-cam/libcamera/src/android/mm/libhardware_stub.c new file mode 100644 index 0000000..28faa63 --- /dev/null +++ b/spider-cam/libcamera/src/android/mm/libhardware_stub.c @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* + * Copyright (C) 2023, Ideas on Board + * + * Android libhardware stub for test compilation + */ + +#include + +#include + +int hw_get_module(const char *id __attribute__((__unused__)), + const struct hw_module_t **module) +{ + *module = NULL; + return -ENOTSUP; +} diff --git a/spider-cam/libcamera/src/android/mm/meson.build b/spider-cam/libcamera/src/android/mm/meson.build new file mode 100644 index 0000000..e3e0484 --- /dev/null +++ b/spider-cam/libcamera/src/android/mm/meson.build @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: CC0-1.0 + +platform = get_option('android_platform') +if platform == 'generic' + android_hal_sources += files(['generic_camera_buffer.cpp', + 'generic_frame_buffer_allocator.cpp']) + android_deps += [libdl] + + libhardware = dependency('libhardware', required : false) + if libhardware.found() + android_deps += [libhardware] + else + android_hal_sources += files(['libhardware_stub.c']) + endif +elif platform == 'cros' + android_hal_sources += files(['cros_camera_buffer.cpp', + 'cros_frame_buffer_allocator.cpp']) + android_deps += [dependency('libcros_camera')] +endif diff --git a/spider-cam/libcamera/src/android/post_processor.h b/spider-cam/libcamera/src/android/post_processor.h new file mode 100644 index 0000000..b504a37 --- /dev/null +++ b/spider-cam/libcamera/src/android/post_processor.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Google Inc. + * + * CameraStream Post Processing Interface + */ + +#pragma once + +#include + +#include +#include + +#include "camera_buffer.h" +#include "camera_request.h" + +class PostProcessor +{ +public: + enum class Status { + Error, + Success + }; + + virtual ~PostProcessor() = default; + + virtual int configure(const libcamera::StreamConfiguration &inCfg, + const libcamera::StreamConfiguration &outCfg) = 0; + virtual void process(Camera3RequestDescriptor::StreamBuffer *streamBuffer) = 0; + + libcamera::Signal processComplete; +}; diff --git a/spider-cam/libcamera/src/android/yuv/post_processor_yuv.cpp b/spider-cam/libcamera/src/android/yuv/post_processor_yuv.cpp new file mode 100644 index 0000000..c998807 --- /dev/null +++ b/spider-cam/libcamera/src/android/yuv/post_processor_yuv.cpp @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Post Processor using libyuv + */ + +#include "post_processor_yuv.h" + +#include + +#include + +#include +#include +#include + +#include "libcamera/internal/formats.h" +#include "libcamera/internal/mapped_framebuffer.h" + +using namespace libcamera; + +LOG_DEFINE_CATEGORY(YUV) + +int PostProcessorYuv::configure(const StreamConfiguration &inCfg, + const StreamConfiguration &outCfg) +{ + if (inCfg.pixelFormat != outCfg.pixelFormat) { + LOG(YUV, Error) << "Pixel format conversion is not supported" + << " (from " << inCfg.pixelFormat + << " to " << outCfg.pixelFormat << ")"; + return -EINVAL; + } + + if (inCfg.size < outCfg.size) { + LOG(YUV, Error) << "Up-scaling is not supported" + << " (from " << inCfg.size + << " to " << outCfg.size << ")"; + return -EINVAL; + } + + if (inCfg.pixelFormat != formats::NV12) { + LOG(YUV, Error) << "Unsupported format " << inCfg.pixelFormat + << " (only NV12 is supported)"; + return -EINVAL; + } + + calculateLengths(inCfg, outCfg); + return 0; +} + +void PostProcessorYuv::process(Camera3RequestDescriptor::StreamBuffer *streamBuffer) +{ + const FrameBuffer &source = *streamBuffer->srcBuffer; + CameraBuffer *destination = streamBuffer->dstBuffer.get(); + + if (!isValidBuffers(source, *destination)) { + processComplete.emit(streamBuffer, PostProcessor::Status::Error); + return; + } + + const MappedFrameBuffer sourceMapped(&source, MappedFrameBuffer::MapFlag::Read); + if (!sourceMapped.isValid()) { + LOG(YUV, Error) << "Failed to mmap camera frame buffer"; + processComplete.emit(streamBuffer, PostProcessor::Status::Error); + return; + } + + int ret = libyuv::NV12Scale(sourceMapped.planes()[0].data(), + sourceStride_[0], + sourceMapped.planes()[1].data(), + sourceStride_[1], + sourceSize_.width, sourceSize_.height, + destination->plane(0).data(), + destinationStride_[0], + destination->plane(1).data(), + destinationStride_[1], + destinationSize_.width, + destinationSize_.height, + libyuv::FilterMode::kFilterBilinear); + if (ret) { + LOG(YUV, Error) << "Failed NV12 scaling: " << ret; + processComplete.emit(streamBuffer, PostProcessor::Status::Error); + return; + } + + processComplete.emit(streamBuffer, PostProcessor::Status::Success); +} + +bool PostProcessorYuv::isValidBuffers(const FrameBuffer &source, + const CameraBuffer &destination) const +{ + if (source.planes().size() != 2) { + LOG(YUV, Error) << "Invalid number of source planes: " + << source.planes().size(); + return false; + } + if (destination.numPlanes() != 2) { + LOG(YUV, Error) << "Invalid number of destination planes: " + << destination.numPlanes(); + return false; + } + + if (source.planes()[0].length < sourceLength_[0] || + source.planes()[1].length < sourceLength_[1]) { + LOG(YUV, Error) + << "The source planes lengths are too small, actual size: {" + << source.planes()[0].length << ", " + << source.planes()[1].length + << "}, expected size: {" + << sourceLength_[0] << ", " + << sourceLength_[1] << "}"; + return false; + } + if (destination.plane(0).size() < destinationLength_[0] || + destination.plane(1).size() < destinationLength_[1]) { + LOG(YUV, Error) + << "The destination planes lengths are too small, actual size: {" + << destination.plane(0).size() << ", " + << destination.plane(1).size() + << "}, expected size: {" + << sourceLength_[0] << ", " + << sourceLength_[1] << "}"; + return false; + } + + return true; +} + +void PostProcessorYuv::calculateLengths(const StreamConfiguration &inCfg, + const StreamConfiguration &outCfg) +{ + sourceSize_ = inCfg.size; + destinationSize_ = outCfg.size; + + const PixelFormatInfo &nv12Info = PixelFormatInfo::info(formats::NV12); + for (unsigned int i = 0; i < 2; i++) { + sourceStride_[i] = inCfg.stride; + destinationStride_[i] = nv12Info.stride(destinationSize_.width, i, 1); + + sourceLength_[i] = nv12Info.planeSize(sourceSize_.height, i, + sourceStride_[i]); + destinationLength_[i] = nv12Info.planeSize(destinationSize_.height, i, + destinationStride_[i]); + } +} diff --git a/spider-cam/libcamera/src/android/yuv/post_processor_yuv.h b/spider-cam/libcamera/src/android/yuv/post_processor_yuv.h new file mode 100644 index 0000000..ed7bb1f --- /dev/null +++ b/spider-cam/libcamera/src/android/yuv/post_processor_yuv.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * + * Post Processor using libyuv + */ + +#pragma once + +#include "../post_processor.h" + +#include + +class PostProcessorYuv : public PostProcessor +{ +public: + PostProcessorYuv() = default; + + int configure(const libcamera::StreamConfiguration &incfg, + const libcamera::StreamConfiguration &outcfg) override; + void process(Camera3RequestDescriptor::StreamBuffer *streamBuffer) override; + +private: + bool isValidBuffers(const libcamera::FrameBuffer &source, + const CameraBuffer &destination) const; + void calculateLengths(const libcamera::StreamConfiguration &inCfg, + const libcamera::StreamConfiguration &outCfg); + + libcamera::Size sourceSize_; + libcamera::Size destinationSize_; + unsigned int sourceLength_[2] = {}; + unsigned int destinationLength_[2] = {}; + unsigned int sourceStride_[2] = {}; + unsigned int destinationStride_[2] = {}; +}; diff --git a/spider-cam/libcamera/src/apps/cam/camera_session.cpp b/spider-cam/libcamera/src/apps/cam/camera_session.cpp new file mode 100644 index 0000000..097dc47 --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/camera_session.cpp @@ -0,0 +1,474 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Camera capture session + */ + +#include +#include +#include +#include + +#include +#include + +#include "../common/event_loop.h" +#include "../common/stream_options.h" + +#include "camera_session.h" +#include "capture_script.h" +#include "file_sink.h" +#ifdef HAVE_KMS +#include "kms_sink.h" +#endif +#include "main.h" +#ifdef HAVE_SDL +#include "sdl_sink.h" +#endif + +using namespace libcamera; + +CameraSession::CameraSession(CameraManager *cm, + const std::string &cameraId, + unsigned int cameraIndex, + const OptionsParser::Options &options) + : options_(options), cameraIndex_(cameraIndex), last_(0), + queueCount_(0), captureCount_(0), captureLimit_(0), + printMetadata_(false) +{ + char *endptr; + unsigned long index = strtoul(cameraId.c_str(), &endptr, 10); + + if (*endptr == '\0' && index > 0) { + auto cameras = cm->cameras(); + if (index <= cameras.size()) + camera_ = cameras[index - 1]; + } + + if (!camera_) + camera_ = cm->get(cameraId); + + if (!camera_) { + std::cerr << "Camera " << cameraId << " not found" << std::endl; + return; + } + + if (camera_->acquire()) { + std::cerr << "Failed to acquire camera " << cameraId + << std::endl; + return; + } + + std::vector roles = StreamKeyValueParser::roles(options_[OptStream]); + + std::unique_ptr config = + camera_->generateConfiguration(roles); + if (!config || config->size() != roles.size()) { + std::cerr << "Failed to get default stream configuration" + << std::endl; + return; + } + + if (options_.isSet(OptOrientation)) { + std::string orientOpt = options_[OptOrientation].toString(); + static const std::map orientations{ + { "rot0", libcamera::Orientation::Rotate0 }, + { "rot180", libcamera::Orientation::Rotate180 }, + { "mirror", libcamera::Orientation::Rotate0Mirror }, + { "flip", libcamera::Orientation::Rotate180Mirror }, + }; + + auto orientation = orientations.find(orientOpt); + if (orientation == orientations.end()) { + std::cerr << "Invalid orientation " << orientOpt << std::endl; + return; + } + + config->orientation = orientation->second; + } + + /* Apply configuration if explicitly requested. */ + if (StreamKeyValueParser::updateConfiguration(config.get(), + options_[OptStream])) { + std::cerr << "Failed to update configuration" << std::endl; + return; + } + + bool strictFormats = options_.isSet(OptStrictFormats); + +#ifdef HAVE_KMS + if (options_.isSet(OptDisplay)) { + if (options_.isSet(OptFile)) { + std::cerr << "--display and --file options are mutually exclusive" + << std::endl; + return; + } + + if (roles.size() != 1) { + std::cerr << "Display doesn't support multiple streams" + << std::endl; + return; + } + + if (roles[0] != StreamRole::Viewfinder) { + std::cerr << "Display requires a viewfinder stream" + << std::endl; + return; + } + } +#endif + + if (options_.isSet(OptCaptureScript)) { + std::string scriptName = options_[OptCaptureScript].toString(); + script_ = std::make_unique(camera_, scriptName); + if (!script_->valid()) { + std::cerr << "Invalid capture script '" << scriptName + << "'" << std::endl; + return; + } + } + + switch (config->validate()) { + case CameraConfiguration::Valid: + break; + + case CameraConfiguration::Adjusted: + if (strictFormats) { + std::cout << "Adjusting camera configuration disallowed by --strict-formats argument" + << std::endl; + return; + } + std::cout << "Camera configuration adjusted" << std::endl; + break; + + case CameraConfiguration::Invalid: + std::cout << "Camera configuration invalid" << std::endl; + return; + } + + config_ = std::move(config); +} + +CameraSession::~CameraSession() +{ + if (camera_) + camera_->release(); +} + +void CameraSession::listControls() const +{ + for (const auto &[id, info] : camera_->controls()) { + std::cout << "Control: " << id->name() << ": " + << info.toString() << std::endl; + } +} + +void CameraSession::listProperties() const +{ + for (const auto &[key, value] : camera_->properties()) { + const ControlId *id = properties::properties.at(key); + + std::cout << "Property: " << id->name() << " = " + << value.toString() << std::endl; + } +} + +void CameraSession::infoConfiguration() const +{ + unsigned int index = 0; + for (const StreamConfiguration &cfg : *config_) { + std::cout << index << ": " << cfg.toString() << std::endl; + + const StreamFormats &formats = cfg.formats(); + for (PixelFormat pixelformat : formats.pixelformats()) { + std::cout << " * Pixelformat: " + << pixelformat << " " + << formats.range(pixelformat).toString() + << std::endl; + + for (const Size &size : formats.sizes(pixelformat)) + std::cout << " - " << size << std::endl; + } + + index++; + } +} + +int CameraSession::start() +{ + int ret; + + queueCount_ = 0; + captureCount_ = 0; + captureLimit_ = options_[OptCapture].toInteger(); + printMetadata_ = options_.isSet(OptMetadata); + + ret = camera_->configure(config_.get()); + if (ret < 0) { + std::cout << "Failed to configure camera" << std::endl; + return ret; + } + + streamNames_.clear(); + for (unsigned int index = 0; index < config_->size(); ++index) { + StreamConfiguration &cfg = config_->at(index); + streamNames_[cfg.stream()] = "cam" + std::to_string(cameraIndex_) + + "-stream" + std::to_string(index); + } + + camera_->requestCompleted.connect(this, &CameraSession::requestComplete); + +#ifdef HAVE_KMS + if (options_.isSet(OptDisplay)) + sink_ = std::make_unique(options_[OptDisplay].toString()); +#endif + +#ifdef HAVE_SDL + if (options_.isSet(OptSDL)) + sink_ = std::make_unique(); +#endif + + if (options_.isSet(OptFile)) { + if (!options_[OptFile].toString().empty()) + sink_ = std::make_unique(camera_.get(), streamNames_, + options_[OptFile]); + else + sink_ = std::make_unique(camera_.get(), streamNames_); + } + + if (sink_) { + ret = sink_->configure(*config_); + if (ret < 0) { + std::cout << "Failed to configure frame sink" + << std::endl; + return ret; + } + + sink_->requestProcessed.connect(this, &CameraSession::sinkRelease); + } + + allocator_ = std::make_unique(camera_); + + return startCapture(); +} + +void CameraSession::stop() +{ + int ret = camera_->stop(); + if (ret) + std::cout << "Failed to stop capture" << std::endl; + + if (sink_) { + ret = sink_->stop(); + if (ret) + std::cout << "Failed to stop frame sink" << std::endl; + } + + sink_.reset(); + + requests_.clear(); + + allocator_.reset(); +} + +int CameraSession::startCapture() +{ + int ret; + + /* Identify the stream with the least number of buffers. */ + unsigned int nbuffers = UINT_MAX; + for (StreamConfiguration &cfg : *config_) { + ret = allocator_->allocate(cfg.stream()); + if (ret < 0) { + std::cerr << "Can't allocate buffers" << std::endl; + return -ENOMEM; + } + + unsigned int allocated = allocator_->buffers(cfg.stream()).size(); + nbuffers = std::min(nbuffers, allocated); + } + + /* + * TODO: make cam tool smarter to support still capture by for + * example pushing a button. For now run all streams all the time. + */ + + for (unsigned int i = 0; i < nbuffers; i++) { + std::unique_ptr request = camera_->createRequest(); + if (!request) { + std::cerr << "Can't create request" << std::endl; + return -ENOMEM; + } + + for (StreamConfiguration &cfg : *config_) { + Stream *stream = cfg.stream(); + const std::vector> &buffers = + allocator_->buffers(stream); + const std::unique_ptr &buffer = buffers[i]; + + ret = request->addBuffer(stream, buffer.get()); + if (ret < 0) { + std::cerr << "Can't set buffer for request" + << std::endl; + return ret; + } + + if (sink_) + sink_->mapBuffer(buffer.get()); + } + + requests_.push_back(std::move(request)); + } + + if (sink_) { + ret = sink_->start(); + if (ret) { + std::cout << "Failed to start frame sink" << std::endl; + return ret; + } + } + + ret = camera_->start(); + if (ret) { + std::cout << "Failed to start capture" << std::endl; + if (sink_) + sink_->stop(); + return ret; + } + + for (std::unique_ptr &request : requests_) { + ret = queueRequest(request.get()); + if (ret < 0) { + std::cerr << "Can't queue request" << std::endl; + camera_->stop(); + if (sink_) + sink_->stop(); + return ret; + } + } + + if (captureLimit_) + std::cout << "cam" << cameraIndex_ + << ": Capture " << captureLimit_ << " frames" + << std::endl; + else + std::cout << "cam" << cameraIndex_ + << ": Capture until user interrupts by SIGINT" + << std::endl; + + return 0; +} + +int CameraSession::queueRequest(Request *request) +{ + if (captureLimit_ && queueCount_ >= captureLimit_) + return 0; + + if (script_) + request->controls() = script_->frameControls(queueCount_); + + queueCount_++; + + return camera_->queueRequest(request); +} + +void CameraSession::requestComplete(Request *request) +{ + if (request->status() == Request::RequestCancelled) + return; + + /* + * Defer processing of the completed request to the event loop, to avoid + * blocking the camera manager thread. + */ + EventLoop::instance()->callLater([this, request]() { processRequest(request); }); +} + +void CameraSession::processRequest(Request *request) +{ + /* + * If we've reached the capture limit, we're done. This doesn't + * duplicate the check below that emits the captureDone signal, as this + * function will be called for each request still in flight after the + * capture limit is reached and we don't want to emit the signal every + * single time. + */ + if (captureLimit_ && captureCount_ >= captureLimit_) + return; + + const Request::BufferMap &buffers = request->buffers(); + + /* + * Compute the frame rate. The timestamp is arbitrarily retrieved from + * the first buffer, as all buffers should have matching timestamps. + */ + uint64_t ts = buffers.begin()->second->metadata().timestamp; + double fps = ts - last_; + fps = last_ != 0 && fps ? 1000000000.0 / fps : 0.0; + last_ = ts; + + bool requeue = true; + + std::stringstream info; + info << ts / 1000000000 << "." + << std::setw(6) << std::setfill('0') << ts / 1000 % 1000000 + << " (" << std::fixed << std::setprecision(2) << fps << " fps)"; + + for (const auto &[stream, buffer] : buffers) { + const FrameMetadata &metadata = buffer->metadata(); + + info << " " << streamNames_[stream] + << " seq: " << std::setw(6) << std::setfill('0') << metadata.sequence + << " bytesused: "; + + unsigned int nplane = 0; + for (const FrameMetadata::Plane &plane : metadata.planes()) { + info << plane.bytesused; + if (++nplane < metadata.planes().size()) + info << "/"; + } + } + + if (sink_) { + if (!sink_->processRequest(request)) + requeue = false; + } + + std::cout << info.str() << std::endl; + + if (printMetadata_) { + const ControlList &requestMetadata = request->metadata(); + for (const auto &[key, value] : requestMetadata) { + const ControlId *id = controls::controls.at(key); + std::cout << "\t" << id->name() << " = " + << value.toString() << std::endl; + } + } + + /* + * Notify the user that capture is complete if the limit has just been + * reached. + */ + captureCount_++; + if (captureLimit_ && captureCount_ >= captureLimit_) { + captureDone.emit(); + return; + } + + /* + * If the frame sink holds on the request, we'll requeue it later in the + * complete handler. + */ + if (!requeue) + return; + + request->reuse(Request::ReuseBuffers); + queueRequest(request); +} + +void CameraSession::sinkRelease(Request *request) +{ + request->reuse(Request::ReuseBuffers); + queueRequest(request); +} diff --git a/spider-cam/libcamera/src/apps/cam/camera_session.h b/spider-cam/libcamera/src/apps/cam/camera_session.h new file mode 100644 index 0000000..4442fd9 --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/camera_session.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Camera capture session + */ + +#pragma once + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include "../common/options.h" + +class CaptureScript; +class FrameSink; + +class CameraSession +{ +public: + CameraSession(libcamera::CameraManager *cm, + const std::string &cameraId, unsigned int cameraIndex, + const OptionsParser::Options &options); + ~CameraSession(); + + bool isValid() const { return config_ != nullptr; } + const OptionsParser::Options &options() { return options_; } + + libcamera::Camera *camera() { return camera_.get(); } + libcamera::CameraConfiguration *config() { return config_.get(); } + + void listControls() const; + void listProperties() const; + void infoConfiguration() const; + + int start(); + void stop(); + + libcamera::Signal<> captureDone; + +private: + int startCapture(); + + int queueRequest(libcamera::Request *request); + void requestComplete(libcamera::Request *request); + void processRequest(libcamera::Request *request); + void sinkRelease(libcamera::Request *request); + + const OptionsParser::Options &options_; + std::shared_ptr camera_; + std::unique_ptr config_; + + std::unique_ptr script_; + + std::map streamNames_; + std::unique_ptr sink_; + unsigned int cameraIndex_; + + uint64_t last_; + + unsigned int queueCount_; + unsigned int captureCount_; + unsigned int captureLimit_; + bool printMetadata_; + + std::unique_ptr allocator_; + std::vector> requests_; +}; diff --git a/spider-cam/libcamera/src/apps/cam/capture-script.yaml b/spider-cam/libcamera/src/apps/cam/capture-script.yaml new file mode 100644 index 0000000..7118865 --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/capture-script.yaml @@ -0,0 +1,71 @@ +# SPDX-License-Identifier: CC0-1.0 + +# Capture script example +# +# A capture script allows to associate a list of controls and their values +# to frame numbers. +# +# The script allows defining a list of frames associated with controls +# and an optional list of properties that can control the script behaviour. + +# properties: +# # Repeat the controls every 'idx' frames. +# - loop: idx +# +# # List of frame number with associated a list of controls to be applied +# frames: +# - frame-number: +# Control1: value1 +# Control2: value2 + +# \todo Formally define the capture script structure with a schema + +# Notes: +# - Controls have to be specified by name, as defined in the +# libcamera::controls:: enumeration +# - Controls not supported by the camera currently operated are ignored +# - Frame numbers shall be monotonically incrementing, gaps are allowed +# - If a loop limit is specified, frame numbers in the 'frames' list shall be +# less than the loop control + +# Example: Turn brightness up and down every 460 frames + +properties: + - loop: 460 + +frames: + - 0: + Brightness: 0.0 + + - 40: + Brightness: 0.2 + + - 80: + Brightness: 0.4 + + - 120: + Brightness: 0.8 + + - 160: + Brightness: 0.4 + + - 200: + Brightness: 0.2 + + - 240: + Brightness: 0.0 + + - 280: + Brightness: -0.2 + + - 300: + Brightness: -0.4 + + - 340: + Brightness: -0.8 + + - 380: + Brightness: -0.4 + + - 420: + Brightness: -0.2 diff --git a/spider-cam/libcamera/src/apps/cam/capture_script.cpp b/spider-cam/libcamera/src/apps/cam/capture_script.cpp new file mode 100644 index 0000000..fc1dfa7 --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/capture_script.cpp @@ -0,0 +1,662 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2022, Ideas on Board Oy + * + * Capture session configuration script + */ + +#include "capture_script.h" + +#include +#include +#include + +using namespace libcamera; + +CaptureScript::CaptureScript(std::shared_ptr camera, + const std::string &fileName) + : camera_(camera), loop_(0), valid_(false) +{ + FILE *fh = fopen(fileName.c_str(), "r"); + if (!fh) { + int ret = -errno; + std::cerr << "Failed to open capture script " << fileName + << ": " << strerror(-ret) << std::endl; + return; + } + + /* + * Map the camera's controls to their name so that they can be + * easily identified when parsing the script file. + */ + for (const auto &[control, info] : camera_->controls()) + controls_[control->name()] = control; + + int ret = parseScript(fh); + fclose(fh); + if (ret) + return; + + valid_ = true; +} + +/* Retrieve the control list associated with a frame number. */ +const ControlList &CaptureScript::frameControls(unsigned int frame) +{ + static ControlList controls{}; + unsigned int idx = frame; + + /* If we loop, repeat the controls every 'loop_' frames. */ + if (loop_) + idx = frame % loop_; + + auto it = frameControls_.find(idx); + if (it == frameControls_.end()) + return controls; + + return it->second; +} + +CaptureScript::EventPtr CaptureScript::nextEvent(yaml_event_type_t expectedType) +{ + EventPtr event(new yaml_event_t); + + if (!yaml_parser_parse(&parser_, event.get())) + return nullptr; + + if (expectedType != YAML_NO_EVENT && !checkEvent(event, expectedType)) + return nullptr; + + return event; +} + +bool CaptureScript::checkEvent(const EventPtr &event, yaml_event_type_t expectedType) const +{ + if (event->type != expectedType) { + std::cerr << "Capture script error on line " << event->start_mark.line + << " column " << event->start_mark.column << ": " + << "Expected " << eventTypeName(expectedType) + << " event, got " << eventTypeName(event->type) + << std::endl; + return false; + } + + return true; +} + +std::string CaptureScript::eventScalarValue(const EventPtr &event) +{ + return std::string(reinterpret_cast(event->data.scalar.value), + event->data.scalar.length); +} + +std::string CaptureScript::eventTypeName(yaml_event_type_t type) +{ + static const std::map typeNames = { + { YAML_STREAM_START_EVENT, "stream-start" }, + { YAML_STREAM_END_EVENT, "stream-end" }, + { YAML_DOCUMENT_START_EVENT, "document-start" }, + { YAML_DOCUMENT_END_EVENT, "document-end" }, + { YAML_ALIAS_EVENT, "alias" }, + { YAML_SCALAR_EVENT, "scalar" }, + { YAML_SEQUENCE_START_EVENT, "sequence-start" }, + { YAML_SEQUENCE_END_EVENT, "sequence-end" }, + { YAML_MAPPING_START_EVENT, "mapping-start" }, + { YAML_MAPPING_END_EVENT, "mapping-end" }, + }; + + auto it = typeNames.find(type); + if (it == typeNames.end()) + return "[type " + std::to_string(type) + "]"; + + return it->second; +} + +int CaptureScript::parseScript(FILE *script) +{ + int ret = yaml_parser_initialize(&parser_); + if (!ret) { + std::cerr << "Failed to initialize yaml parser" << std::endl; + return ret; + } + + /* Delete the parser upon function exit. */ + struct ParserDeleter { + ParserDeleter(yaml_parser_t *parser) : parser_(parser) { } + ~ParserDeleter() { yaml_parser_delete(parser_); } + yaml_parser_t *parser_; + } deleter(&parser_); + + yaml_parser_set_input_file(&parser_, script); + + EventPtr event = nextEvent(YAML_STREAM_START_EVENT); + if (!event) + return -EINVAL; + + event = nextEvent(YAML_DOCUMENT_START_EVENT); + if (!event) + return -EINVAL; + + event = nextEvent(YAML_MAPPING_START_EVENT); + if (!event) + return -EINVAL; + + while (1) { + event = nextEvent(); + if (!event) + return -EINVAL; + + if (event->type == YAML_MAPPING_END_EVENT) + return 0; + + if (!checkEvent(event, YAML_SCALAR_EVENT)) + return -EINVAL; + + std::string section = eventScalarValue(event); + + if (section == "properties") { + ret = parseProperties(); + if (ret) + return ret; + } else if (section == "frames") { + ret = parseFrames(); + if (ret) + return ret; + } else { + std::cerr << "Unsupported section '" << section << "'" + << std::endl; + return -EINVAL; + } + } +} + +int CaptureScript::parseProperty() +{ + EventPtr event = nextEvent(YAML_MAPPING_START_EVENT); + if (!event) + return -EINVAL; + + std::string prop = parseScalar(); + if (prop.empty()) + return -EINVAL; + + if (prop == "loop") { + event = nextEvent(); + if (!event) + return -EINVAL; + + std::string value = eventScalarValue(event); + if (value.empty()) + return -EINVAL; + + loop_ = atoi(value.c_str()); + if (!loop_) { + std::cerr << "Invalid loop limit '" << loop_ << "'" + << std::endl; + return -EINVAL; + } + } else { + std::cerr << "Unsupported property '" << prop << "'" << std::endl; + return -EINVAL; + } + + event = nextEvent(YAML_MAPPING_END_EVENT); + if (!event) + return -EINVAL; + + return 0; +} + +int CaptureScript::parseProperties() +{ + EventPtr event = nextEvent(YAML_SEQUENCE_START_EVENT); + if (!event) + return -EINVAL; + + while (1) { + if (event->type == YAML_SEQUENCE_END_EVENT) + return 0; + + int ret = parseProperty(); + if (ret) + return ret; + + event = nextEvent(); + if (!event) + return -EINVAL; + } + + return 0; +} + +int CaptureScript::parseFrames() +{ + EventPtr event = nextEvent(YAML_SEQUENCE_START_EVENT); + if (!event) + return -EINVAL; + + while (1) { + event = nextEvent(); + if (!event) + return -EINVAL; + + if (event->type == YAML_SEQUENCE_END_EVENT) + return 0; + + int ret = parseFrame(std::move(event)); + if (ret) + return ret; + } +} + +int CaptureScript::parseFrame(EventPtr event) +{ + if (!checkEvent(event, YAML_MAPPING_START_EVENT)) + return -EINVAL; + + std::string key = parseScalar(); + if (key.empty()) + return -EINVAL; + + unsigned int frameId = atoi(key.c_str()); + if (loop_ && frameId >= loop_) { + std::cerr + << "Frame id (" << frameId << ") shall be smaller than" + << "loop limit (" << loop_ << ")" << std::endl; + return -EINVAL; + } + + event = nextEvent(YAML_MAPPING_START_EVENT); + if (!event) + return -EINVAL; + + ControlList controls{}; + + while (1) { + event = nextEvent(); + if (!event) + return -EINVAL; + + if (event->type == YAML_MAPPING_END_EVENT) + break; + + int ret = parseControl(std::move(event), controls); + if (ret) + return ret; + } + + frameControls_[frameId] = std::move(controls); + + event = nextEvent(YAML_MAPPING_END_EVENT); + if (!event) + return -EINVAL; + + return 0; +} + +int CaptureScript::parseControl(EventPtr event, ControlList &controls) +{ + /* We expect a value after a key. */ + std::string name = eventScalarValue(event); + if (name.empty()) + return -EINVAL; + + /* If the camera does not support the control just ignore it. */ + auto it = controls_.find(name); + if (it == controls_.end()) { + std::cerr << "Unsupported control '" << name << "'" << std::endl; + return -EINVAL; + } + + const ControlId *controlId = it->second; + + ControlValue val = unpackControl(controlId); + if (val.isNone()) { + std::cerr << "Error unpacking control '" << name << "'" + << std::endl; + return -EINVAL; + } + + controls.set(controlId->id(), val); + + return 0; +} + +std::string CaptureScript::parseScalar() +{ + EventPtr event = nextEvent(YAML_SCALAR_EVENT); + if (!event) + return ""; + + return eventScalarValue(event); +} + +ControlValue CaptureScript::parseRectangles() +{ + std::vector rectangles; + + std::vector> arrays = parseArrays(); + if (arrays.empty()) + return {}; + + for (const std::vector &values : arrays) { + if (values.size() != 4) { + std::cerr << "Error parsing Rectangle: expected " + << "array with 4 parameters" << std::endl; + return {}; + } + + Rectangle rect = unpackRectangle(values); + rectangles.push_back(rect); + } + + ControlValue controlValue; + if (rectangles.size() == 1) + controlValue.set(rectangles.at(0)); + else + controlValue.set(Span(rectangles)); + + return controlValue; +} + +std::vector> CaptureScript::parseArrays() +{ + EventPtr event = nextEvent(YAML_SEQUENCE_START_EVENT); + if (!event) + return {}; + + event = nextEvent(); + if (!event) + return {}; + + std::vector> valueArrays; + + /* Parse single array. */ + if (event->type == YAML_SCALAR_EVENT) { + std::string firstValue = eventScalarValue(event); + if (firstValue.empty()) + return {}; + + std::vector remaining = parseSingleArray(); + + std::vector values = { firstValue }; + values.insert(std::end(values), + std::begin(remaining), std::end(remaining)); + valueArrays.push_back(values); + + return valueArrays; + } + + /* Parse array of arrays. */ + while (1) { + switch (event->type) { + case YAML_SEQUENCE_START_EVENT: { + std::vector values = parseSingleArray(); + valueArrays.push_back(values); + break; + } + case YAML_SEQUENCE_END_EVENT: + return valueArrays; + default: + return {}; + } + + event = nextEvent(); + if (!event) + return {}; + } +} + +std::vector CaptureScript::parseSingleArray() +{ + std::vector values; + + while (1) { + EventPtr event = nextEvent(); + if (!event) + return {}; + + switch (event->type) { + case YAML_SCALAR_EVENT: { + std::string value = eventScalarValue(event); + if (value.empty()) + return {}; + values.push_back(value); + break; + } + case YAML_SEQUENCE_END_EVENT: + return values; + default: + return {}; + } + } +} + +void CaptureScript::unpackFailure(const ControlId *id, const std::string &repr) +{ + static const std::map typeNames = { + { ControlTypeNone, "none" }, + { ControlTypeBool, "bool" }, + { ControlTypeByte, "byte" }, + { ControlTypeInteger32, "int32" }, + { ControlTypeInteger64, "int64" }, + { ControlTypeFloat, "float" }, + { ControlTypeString, "string" }, + { ControlTypeRectangle, "Rectangle" }, + { ControlTypeSize, "Size" }, + }; + + const char *typeName; + auto it = typeNames.find(id->type()); + if (it != typeNames.end()) + typeName = it->second; + else + typeName = "unknown"; + + std::cerr << "Unsupported control '" << repr << "' for " + << typeName << " control " << id->name() << std::endl; +} + +ControlValue CaptureScript::parseScalarControl(const ControlId *id, + const std::string repr) +{ + ControlValue value{}; + + switch (id->type()) { + case ControlTypeNone: + break; + case ControlTypeBool: { + bool val; + + if (repr == "true") { + val = true; + } else if (repr == "false") { + val = false; + } else { + unpackFailure(id, repr); + return value; + } + + value.set(val); + break; + } + case ControlTypeByte: { + uint8_t val = strtol(repr.c_str(), NULL, 10); + value.set(val); + break; + } + case ControlTypeInteger32: { + int32_t val = strtol(repr.c_str(), NULL, 10); + value.set(val); + break; + } + case ControlTypeInteger64: { + int64_t val = strtoll(repr.c_str(), NULL, 10); + value.set(val); + break; + } + case ControlTypeFloat: { + float val = strtof(repr.c_str(), NULL); + value.set(val); + break; + } + case ControlTypeString: { + value.set(repr); + break; + } + default: + std::cerr << "Unsupported control type" << std::endl; + break; + } + + return value; +} + +ControlValue CaptureScript::parseArrayControl(const ControlId *id, + const std::vector &repr) +{ + ControlValue value{}; + + switch (id->type()) { + case ControlTypeNone: + break; + case ControlTypeBool: { + /* + * This is unpleasant, but we cannot use an std::vector<> as its + * boolean type overload does not allow to access the raw data, + * as boolean values are stored in a bitmask for efficiency. + * + * As we need a contiguous memory region to wrap in a Span<>, + * use an array instead but be strict about not overflowing it + * by limiting the number of controls we can store. + * + * Be loud but do not fail, as the issue would present at + * runtime and it's not fatal. + */ + static constexpr unsigned int kMaxNumBooleanControls = 1024; + std::array values; + unsigned int idx = 0; + + for (const std::string &s : repr) { + bool val; + + if (s == "true") { + val = true; + } else if (s == "false") { + val = false; + } else { + unpackFailure(id, s); + return value; + } + + if (idx == kMaxNumBooleanControls) { + std::cerr << "Cannot parse more than " + << kMaxNumBooleanControls + << " boolean controls" << std::endl; + break; + } + + values[idx++] = val; + } + + value = Span(values.data(), idx); + break; + } + case ControlTypeByte: { + std::vector values; + for (const std::string &s : repr) { + uint8_t val = strtoll(s.c_str(), NULL, 10); + values.push_back(val); + } + + value = Span(values.data(), values.size()); + break; + } + case ControlTypeInteger32: { + std::vector values; + for (const std::string &s : repr) { + int32_t val = strtoll(s.c_str(), NULL, 10); + values.push_back(val); + } + + value = Span(values.data(), values.size()); + break; + } + case ControlTypeInteger64: { + std::vector values; + for (const std::string &s : repr) { + int64_t val = strtoll(s.c_str(), NULL, 10); + values.push_back(val); + } + + value = Span(values.data(), values.size()); + break; + } + case ControlTypeFloat: { + std::vector values; + for (const std::string &s : repr) + values.push_back(strtof(s.c_str(), NULL)); + + value = Span(values.data(), values.size()); + break; + } + case ControlTypeString: { + value = Span(repr.data(), repr.size()); + break; + } + default: + std::cerr << "Unsupported control type" << std::endl; + break; + } + + return value; +} + +ControlValue CaptureScript::unpackControl(const ControlId *id) +{ + /* Parse complex types. */ + switch (id->type()) { + case ControlTypeRectangle: + return parseRectangles(); + case ControlTypeSize: + /* \todo Parse Sizes. */ + return {}; + default: + break; + } + + /* Check if the control has a single scalar value or is an array. */ + EventPtr event = nextEvent(); + if (!event) + return {}; + + switch (event->type) { + case YAML_SCALAR_EVENT: { + const std::string repr = eventScalarValue(event); + if (repr.empty()) + return {}; + + return parseScalarControl(id, repr); + } + case YAML_SEQUENCE_START_EVENT: { + std::vector array = parseSingleArray(); + if (array.empty()) + return {}; + + return parseArrayControl(id, array); + } + default: + std::cerr << "Unexpected event type: " << event->type << std::endl; + return {}; + } +} + +libcamera::Rectangle CaptureScript::unpackRectangle(const std::vector &strVec) +{ + int x = strtol(strVec[0].c_str(), NULL, 10); + int y = strtol(strVec[1].c_str(), NULL, 10); + unsigned int width = strtoul(strVec[2].c_str(), NULL, 10); + unsigned int height = strtoul(strVec[3].c_str(), NULL, 10); + + return Rectangle(x, y, width, height); +} diff --git a/spider-cam/libcamera/src/apps/cam/capture_script.h b/spider-cam/libcamera/src/apps/cam/capture_script.h new file mode 100644 index 0000000..294b920 --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/capture_script.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2022, Ideas on Board Oy + * + * Capture session configuration script + */ + +#pragma once + +#include +#include +#include + +#include +#include + +#include + +class CaptureScript +{ +public: + CaptureScript(std::shared_ptr camera, + const std::string &fileName); + + bool valid() const { return valid_; } + + const libcamera::ControlList &frameControls(unsigned int frame); + +private: + struct EventDeleter { + void operator()(yaml_event_t *event) const + { + yaml_event_delete(event); + delete event; + } + }; + using EventPtr = std::unique_ptr; + + std::map controls_; + std::map frameControls_; + std::shared_ptr camera_; + yaml_parser_t parser_; + unsigned int loop_; + bool valid_; + + EventPtr nextEvent(yaml_event_type_t expectedType = YAML_NO_EVENT); + bool checkEvent(const EventPtr &event, yaml_event_type_t expectedType) const; + static std::string eventScalarValue(const EventPtr &event); + static std::string eventTypeName(yaml_event_type_t type); + + int parseScript(FILE *script); + + int parseProperties(); + int parseProperty(); + int parseFrames(); + int parseFrame(EventPtr event); + int parseControl(EventPtr event, libcamera::ControlList &controls); + + libcamera::ControlValue parseScalarControl(const libcamera::ControlId *id, + const std::string repr); + libcamera::ControlValue parseArrayControl(const libcamera::ControlId *id, + const std::vector &repr); + + std::string parseScalar(); + libcamera::ControlValue parseRectangles(); + std::vector> parseArrays(); + std::vector parseSingleArray(); + + void unpackFailure(const libcamera::ControlId *id, + const std::string &repr); + libcamera::ControlValue unpackControl(const libcamera::ControlId *id); + libcamera::Rectangle unpackRectangle(const std::vector &strVec); +}; diff --git a/spider-cam/libcamera/src/apps/cam/drm.cpp b/spider-cam/libcamera/src/apps/cam/drm.cpp new file mode 100644 index 0000000..47bbb6b --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/drm.cpp @@ -0,0 +1,717 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2021, Ideas on Board Oy + * + * DRM/KMS Helpers + */ + +#include "drm.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "../common/event_loop.h" + +namespace DRM { + +Object::Object(Device *dev, uint32_t id, Type type) + : id_(id), dev_(dev), type_(type) +{ + /* Retrieve properties from the objects that support them. */ + if (type != TypeConnector && type != TypeCrtc && + type != TypeEncoder && type != TypePlane) + return; + + /* + * We can't distinguish between failures due to the object having no + * property and failures due to other conditions. Assume we use the API + * correctly and consider the object has no property. + */ + drmModeObjectProperties *properties = drmModeObjectGetProperties(dev->fd(), id, type); + if (!properties) + return; + + properties_.reserve(properties->count_props); + for (uint32_t i = 0; i < properties->count_props; ++i) + properties_.emplace_back(properties->props[i], + properties->prop_values[i]); + + drmModeFreeObjectProperties(properties); +} + +Object::~Object() +{ +} + +const Property *Object::property(const std::string &name) const +{ + for (const PropertyValue &pv : properties_) { + const Property *property = static_cast(dev_->object(pv.id())); + if (property && property->name() == name) + return property; + } + + return nullptr; +} + +const PropertyValue *Object::propertyValue(const std::string &name) const +{ + for (const PropertyValue &pv : properties_) { + const Property *property = static_cast(dev_->object(pv.id())); + if (property && property->name() == name) + return &pv; + } + + return nullptr; +} + +Property::Property(Device *dev, drmModePropertyRes *property) + : Object(dev, property->prop_id, TypeProperty), + name_(property->name), flags_(property->flags), + values_(property->values, property->values + property->count_values), + blobs_(property->blob_ids, property->blob_ids + property->count_blobs) +{ + if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) + type_ = TypeRange; + else if (drm_property_type_is(property, DRM_MODE_PROP_ENUM)) + type_ = TypeEnum; + else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) + type_ = TypeBlob; + else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) + type_ = TypeBitmask; + else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) + type_ = TypeObject; + else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) + type_ = TypeSignedRange; + else + type_ = TypeUnknown; + + for (int i = 0; i < property->count_enums; ++i) + enums_[property->enums[i].value] = property->enums[i].name; +} + +Blob::Blob(Device *dev, const libcamera::Span &data) + : Object(dev, 0, Object::TypeBlob) +{ + drmModeCreatePropertyBlob(dev->fd(), data.data(), data.size(), &id_); +} + +Blob::~Blob() +{ + if (isValid()) + drmModeDestroyPropertyBlob(device()->fd(), id()); +} + +Mode::Mode(const drmModeModeInfo &mode) + : drmModeModeInfo(mode) +{ +} + +std::unique_ptr Mode::toBlob(Device *dev) const +{ + libcamera::Span data{ reinterpret_cast(this), + sizeof(*this) }; + return std::make_unique(dev, data); +} + +Crtc::Crtc(Device *dev, const drmModeCrtc *crtc, unsigned int index) + : Object(dev, crtc->crtc_id, Object::TypeCrtc), index_(index) +{ +} + +Encoder::Encoder(Device *dev, const drmModeEncoder *encoder) + : Object(dev, encoder->encoder_id, Object::TypeEncoder), + type_(encoder->encoder_type) +{ + const std::list &crtcs = dev->crtcs(); + possibleCrtcs_.reserve(crtcs.size()); + + for (const Crtc &crtc : crtcs) { + if (encoder->possible_crtcs & (1 << crtc.index())) + possibleCrtcs_.push_back(&crtc); + } + + possibleCrtcs_.shrink_to_fit(); +} + +namespace { + +const std::map connectorTypeNames{ + { DRM_MODE_CONNECTOR_Unknown, "Unknown" }, + { DRM_MODE_CONNECTOR_VGA, "VGA" }, + { DRM_MODE_CONNECTOR_DVII, "DVI-I" }, + { DRM_MODE_CONNECTOR_DVID, "DVI-D" }, + { DRM_MODE_CONNECTOR_DVIA, "DVI-A" }, + { DRM_MODE_CONNECTOR_Composite, "Composite" }, + { DRM_MODE_CONNECTOR_SVIDEO, "S-Video" }, + { DRM_MODE_CONNECTOR_LVDS, "LVDS" }, + { DRM_MODE_CONNECTOR_Component, "Component" }, + { DRM_MODE_CONNECTOR_9PinDIN, "9-Pin-DIN" }, + { DRM_MODE_CONNECTOR_DisplayPort, "DP" }, + { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" }, + { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" }, + { DRM_MODE_CONNECTOR_TV, "TV" }, + { DRM_MODE_CONNECTOR_eDP, "eDP" }, + { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" }, + { DRM_MODE_CONNECTOR_DSI, "DSI" }, + { DRM_MODE_CONNECTOR_DPI, "DPI" }, +}; + +} /* namespace */ + +Connector::Connector(Device *dev, const drmModeConnector *connector) + : Object(dev, connector->connector_id, Object::TypeConnector), + type_(connector->connector_type) +{ + auto typeName = connectorTypeNames.find(connector->connector_type); + if (typeName == connectorTypeNames.end()) { + std::cerr + << "Invalid connector type " + << connector->connector_type << std::endl; + typeName = connectorTypeNames.find(DRM_MODE_CONNECTOR_Unknown); + } + + name_ = std::string(typeName->second) + "-" + + std::to_string(connector->connector_type_id); + + switch (connector->connection) { + case DRM_MODE_CONNECTED: + status_ = Status::Connected; + break; + + case DRM_MODE_DISCONNECTED: + status_ = Status::Disconnected; + break; + + case DRM_MODE_UNKNOWNCONNECTION: + default: + status_ = Status::Unknown; + break; + } + + const std::list &encoders = dev->encoders(); + + encoders_.reserve(connector->count_encoders); + + for (int i = 0; i < connector->count_encoders; ++i) { + uint32_t encoderId = connector->encoders[i]; + auto encoder = std::find_if(encoders.begin(), encoders.end(), + [=](const Encoder &e) { + return e.id() == encoderId; + }); + if (encoder == encoders.end()) { + std::cerr + << "Encoder " << encoderId << " not found" + << std::endl; + continue; + } + + encoders_.push_back(&*encoder); + } + + encoders_.shrink_to_fit(); + + modes_ = { connector->modes, connector->modes + connector->count_modes }; +} + +Plane::Plane(Device *dev, const drmModePlane *plane) + : Object(dev, plane->plane_id, Object::TypePlane), + possibleCrtcsMask_(plane->possible_crtcs) +{ + formats_ = { plane->formats, plane->formats + plane->count_formats }; + + const std::list &crtcs = dev->crtcs(); + possibleCrtcs_.reserve(crtcs.size()); + + for (const Crtc &crtc : crtcs) { + if (plane->possible_crtcs & (1 << crtc.index())) + possibleCrtcs_.push_back(&crtc); + } + + possibleCrtcs_.shrink_to_fit(); +} + +bool Plane::supportsFormat(const libcamera::PixelFormat &format) const +{ + return std::find(formats_.begin(), formats_.end(), format.fourcc()) + != formats_.end(); +} + +int Plane::setup() +{ + const PropertyValue *pv = propertyValue("type"); + if (!pv) + return -EINVAL; + + switch (pv->value()) { + case DRM_PLANE_TYPE_OVERLAY: + type_ = TypeOverlay; + break; + + case DRM_PLANE_TYPE_PRIMARY: + type_ = TypePrimary; + break; + + case DRM_PLANE_TYPE_CURSOR: + type_ = TypeCursor; + break; + + default: + return -EINVAL; + } + + return 0; +} + +FrameBuffer::FrameBuffer(Device *dev) + : Object(dev, 0, Object::TypeFb) +{ +} + +FrameBuffer::~FrameBuffer() +{ + for (const auto &plane : planes_) { + struct drm_gem_close gem_close = { + .handle = plane.second.handle, + .pad = 0, + }; + int ret; + + do { + ret = ioctl(device()->fd(), DRM_IOCTL_GEM_CLOSE, &gem_close); + } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); + + if (ret == -1) { + ret = -errno; + std::cerr + << "Failed to close GEM object: " + << strerror(-ret) << std::endl; + } + } + + drmModeRmFB(device()->fd(), id()); +} + +AtomicRequest::AtomicRequest(Device *dev) + : dev_(dev), valid_(true) +{ + request_ = drmModeAtomicAlloc(); + if (!request_) + valid_ = false; +} + +AtomicRequest::~AtomicRequest() +{ + if (request_) + drmModeAtomicFree(request_); +} + +int AtomicRequest::addProperty(const Object *object, const std::string &property, + uint64_t value) +{ + if (!valid_) + return -EINVAL; + + const Property *prop = object->property(property); + if (!prop) { + valid_ = false; + return -EINVAL; + } + + return addProperty(object->id(), prop->id(), value); +} + +int AtomicRequest::addProperty(const Object *object, const std::string &property, + std::unique_ptr blob) +{ + if (!valid_) + return -EINVAL; + + const Property *prop = object->property(property); + if (!prop) { + valid_ = false; + return -EINVAL; + } + + int ret = addProperty(object->id(), prop->id(), blob->id()); + if (ret < 0) + return ret; + + blobs_.emplace_back(std::move(blob)); + + return 0; +} + +int AtomicRequest::addProperty(uint32_t object, uint32_t property, uint64_t value) +{ + int ret = drmModeAtomicAddProperty(request_, object, property, value); + if (ret < 0) { + valid_ = false; + return ret; + } + + return 0; +} + +int AtomicRequest::commit(unsigned int flags) +{ + if (!valid_) + return -EINVAL; + + uint32_t drmFlags = 0; + if (flags & FlagAllowModeset) + drmFlags |= DRM_MODE_ATOMIC_ALLOW_MODESET; + if (flags & FlagAsync) + drmFlags |= DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_ATOMIC_NONBLOCK; + if (flags & FlagTestOnly) + drmFlags |= DRM_MODE_ATOMIC_TEST_ONLY; + + return drmModeAtomicCommit(dev_->fd(), request_, drmFlags, this); +} + +Device::Device() + : fd_(-1) +{ +} + +Device::~Device() +{ + if (fd_ != -1) + drmClose(fd_); +} + +int Device::init() +{ + int ret = openCard(); + if (ret < 0) { + std::cerr << "Failed to open any DRM/KMS device: " + << strerror(-ret) << std::endl; + return ret; + } + + /* + * Enable the atomic APIs. This also automatically enables the + * universal planes API. + */ + ret = drmSetClientCap(fd_, DRM_CLIENT_CAP_ATOMIC, 1); + if (ret < 0) { + ret = -errno; + std::cerr + << "Failed to enable atomic capability: " + << strerror(-ret) << std::endl; + return ret; + } + + /* List all the resources. */ + ret = getResources(); + if (ret < 0) + return ret; + + EventLoop::instance()->addFdEvent(fd_, EventLoop::Read, + std::bind(&Device::drmEvent, this)); + + return 0; +} + +int Device::openCard() +{ + const std::string dirName = "/dev/dri/"; + bool found = false; + int ret; + + /* + * Open the first DRM/KMS device beginning with /dev/dri/card. The + * libdrm drmOpen*() functions require either a module name or a bus ID, + * which we don't have, so bypass them. The automatic module loading and + * device node creation from drmOpen() is of no practical use as any + * modern system will handle that through udev or an equivalent + * component. + */ + DIR *folder = opendir(dirName.c_str()); + if (!folder) { + ret = -errno; + std::cerr << "Failed to open " << dirName + << " directory: " << strerror(-ret) << std::endl; + return ret; + } + + for (struct dirent *res; (res = readdir(folder));) { + uint64_t cap; + + if (strncmp(res->d_name, "card", 4)) + continue; + + const std::string devName = dirName + res->d_name; + fd_ = open(devName.c_str(), O_RDWR | O_CLOEXEC); + if (fd_ < 0) { + ret = -errno; + std::cerr << "Failed to open DRM/KMS device " << devName << ": " + << strerror(-ret) << std::endl; + continue; + } + + /* + * Skip devices that don't support the modeset API, to avoid + * selecting a DRM device corresponding to a GPU. There is no + * modeset capability, but the kernel returns an error for most + * caps if mode setting isn't support by the driver. The + * DRM_CAP_DUMB_BUFFER capability is one of those, other would + * do as well. The capability value itself isn't relevant. + */ + ret = drmGetCap(fd_, DRM_CAP_DUMB_BUFFER, &cap); + if (ret < 0) { + drmClose(fd_); + fd_ = -1; + continue; + } + + found = true; + break; + } + + closedir(folder); + + return found ? 0 : -ENOENT; +} + +int Device::getResources() +{ + int ret; + + std::unique_ptr resources{ + drmModeGetResources(fd_), + &drmModeFreeResources + }; + if (!resources) { + ret = -errno; + std::cerr + << "Failed to get DRM/KMS resources: " + << strerror(-ret) << std::endl; + return ret; + } + + for (int i = 0; i < resources->count_crtcs; ++i) { + drmModeCrtc *crtc = drmModeGetCrtc(fd_, resources->crtcs[i]); + if (!crtc) { + ret = -errno; + std::cerr + << "Failed to get CRTC: " << strerror(-ret) + << std::endl; + return ret; + } + + crtcs_.emplace_back(this, crtc, i); + drmModeFreeCrtc(crtc); + + Crtc &obj = crtcs_.back(); + objects_[obj.id()] = &obj; + } + + for (int i = 0; i < resources->count_encoders; ++i) { + drmModeEncoder *encoder = + drmModeGetEncoder(fd_, resources->encoders[i]); + if (!encoder) { + ret = -errno; + std::cerr + << "Failed to get encoder: " << strerror(-ret) + << std::endl; + return ret; + } + + encoders_.emplace_back(this, encoder); + drmModeFreeEncoder(encoder); + + Encoder &obj = encoders_.back(); + objects_[obj.id()] = &obj; + } + + for (int i = 0; i < resources->count_connectors; ++i) { + drmModeConnector *connector = + drmModeGetConnector(fd_, resources->connectors[i]); + if (!connector) { + ret = -errno; + std::cerr + << "Failed to get connector: " << strerror(-ret) + << std::endl; + return ret; + } + + connectors_.emplace_back(this, connector); + drmModeFreeConnector(connector); + + Connector &obj = connectors_.back(); + objects_[obj.id()] = &obj; + } + + std::unique_ptr planes{ + drmModeGetPlaneResources(fd_), + &drmModeFreePlaneResources + }; + if (!planes) { + ret = -errno; + std::cerr + << "Failed to get DRM/KMS planes: " + << strerror(-ret) << std::endl; + return ret; + } + + for (uint32_t i = 0; i < planes->count_planes; ++i) { + drmModePlane *plane = + drmModeGetPlane(fd_, planes->planes[i]); + if (!plane) { + ret = -errno; + std::cerr + << "Failed to get plane: " << strerror(-ret) + << std::endl; + return ret; + } + + planes_.emplace_back(this, plane); + drmModeFreePlane(plane); + + Plane &obj = planes_.back(); + objects_[obj.id()] = &obj; + } + + /* Set the possible planes for each CRTC. */ + for (Crtc &crtc : crtcs_) { + for (const Plane &plane : planes_) { + if (plane.possibleCrtcsMask_ & (1 << crtc.index())) + crtc.planes_.push_back(&plane); + } + } + + /* Collect all property IDs and create Property instances. */ + std::set properties; + for (const auto &object : objects_) { + for (const PropertyValue &value : object.second->properties()) + properties.insert(value.id()); + } + + for (uint32_t id : properties) { + drmModePropertyRes *property = drmModeGetProperty(fd_, id); + if (!property) { + ret = -errno; + std::cerr + << "Failed to get property: " << strerror(-ret) + << std::endl; + continue; + } + + properties_.emplace_back(this, property); + drmModeFreeProperty(property); + + Property &obj = properties_.back(); + objects_[obj.id()] = &obj; + } + + /* Finally, perform all delayed setup of mode objects. */ + for (auto &object : objects_) { + ret = object.second->setup(); + if (ret < 0) { + std::cerr + << "Failed to setup object " << object.second->id() + << ": " << strerror(-ret) << std::endl; + return ret; + } + } + + return 0; +} + +const Object *Device::object(uint32_t id) +{ + const auto iter = objects_.find(id); + if (iter == objects_.end()) + return nullptr; + + return iter->second; +} + +std::unique_ptr Device::createFrameBuffer( + const libcamera::FrameBuffer &buffer, + const libcamera::PixelFormat &format, + const libcamera::Size &size, + const std::array &strides) +{ + std::unique_ptr fb{ new FrameBuffer(this) }; + + uint32_t handles[4] = {}; + uint32_t offsets[4] = {}; + int ret; + + const std::vector &planes = buffer.planes(); + + unsigned int i = 0; + for (const libcamera::FrameBuffer::Plane &plane : planes) { + int fd = plane.fd.get(); + uint32_t handle; + + auto iter = fb->planes_.find(fd); + if (iter == fb->planes_.end()) { + ret = drmPrimeFDToHandle(fd_, plane.fd.get(), &handle); + if (ret < 0) { + ret = -errno; + std::cerr + << "Unable to import framebuffer dmabuf: " + << strerror(-ret) << std::endl; + return nullptr; + } + + fb->planes_[fd] = { handle }; + } else { + handle = iter->second.handle; + } + + handles[i] = handle; + offsets[i] = plane.offset; + ++i; + } + + ret = drmModeAddFB2(fd_, size.width, size.height, format.fourcc(), handles, + strides.data(), offsets, &fb->id_, 0); + if (ret < 0) { + ret = -errno; + std::cerr + << "Failed to add framebuffer: " + << strerror(-ret) << std::endl; + return nullptr; + } + + return fb; +} + +void Device::drmEvent() +{ + drmEventContext ctx{}; + ctx.version = DRM_EVENT_CONTEXT_VERSION; + ctx.page_flip_handler = &Device::pageFlipComplete; + + drmHandleEvent(fd_, &ctx); +} + +void Device::pageFlipComplete([[maybe_unused]] int fd, + [[maybe_unused]] unsigned int sequence, + [[maybe_unused]] unsigned int tv_sec, + [[maybe_unused]] unsigned int tv_usec, + void *user_data) +{ + AtomicRequest *request = static_cast(user_data); + request->device()->requestComplete.emit(request); +} + +} /* namespace DRM */ diff --git a/spider-cam/libcamera/src/apps/cam/drm.h b/spider-cam/libcamera/src/apps/cam/drm.h new file mode 100644 index 0000000..1ba83b6 --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/drm.h @@ -0,0 +1,334 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2021, Ideas on Board Oy + * + * DRM/KMS Helpers + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +namespace libcamera { +class FrameBuffer; +class PixelFormat; +class Size; +} /* namespace libcamera */ + +namespace DRM { + +class Device; +class Plane; +class Property; +class PropertyValue; + +class Object +{ +public: + enum Type { + TypeCrtc = DRM_MODE_OBJECT_CRTC, + TypeConnector = DRM_MODE_OBJECT_CONNECTOR, + TypeEncoder = DRM_MODE_OBJECT_ENCODER, + TypeMode = DRM_MODE_OBJECT_MODE, + TypeProperty = DRM_MODE_OBJECT_PROPERTY, + TypeFb = DRM_MODE_OBJECT_FB, + TypeBlob = DRM_MODE_OBJECT_BLOB, + TypePlane = DRM_MODE_OBJECT_PLANE, + TypeAny = DRM_MODE_OBJECT_ANY, + }; + + Object(Device *dev, uint32_t id, Type type); + virtual ~Object(); + + Device *device() const { return dev_; } + uint32_t id() const { return id_; } + Type type() const { return type_; } + + const Property *property(const std::string &name) const; + const PropertyValue *propertyValue(const std::string &name) const; + const std::vector &properties() const { return properties_; } + +protected: + virtual int setup() + { + return 0; + } + + uint32_t id_; + +private: + friend Device; + + Device *dev_; + Type type_; + std::vector properties_; +}; + +class Property : public Object +{ +public: + enum Type { + TypeUnknown = 0, + TypeRange, + TypeEnum, + TypeBlob, + TypeBitmask, + TypeObject, + TypeSignedRange, + }; + + Property(Device *dev, drmModePropertyRes *property); + + Type type() const { return type_; } + const std::string &name() const { return name_; } + + bool isImmutable() const { return flags_ & DRM_MODE_PROP_IMMUTABLE; } + + const std::vector values() const { return values_; } + const std::map &enums() const { return enums_; } + const std::vector blobs() const { return blobs_; } + +private: + Type type_; + std::string name_; + uint32_t flags_; + std::vector values_; + std::map enums_; + std::vector blobs_; +}; + +class PropertyValue +{ +public: + PropertyValue(uint32_t id, uint64_t value) + : id_(id), value_(value) + { + } + + uint32_t id() const { return id_; } + uint32_t value() const { return value_; } + +private: + uint32_t id_; + uint64_t value_; +}; + +class Blob : public Object +{ +public: + Blob(Device *dev, const libcamera::Span &data); + ~Blob(); + + bool isValid() const { return id() != 0; } +}; + +class Mode : public drmModeModeInfo +{ +public: + Mode(const drmModeModeInfo &mode); + + std::unique_ptr toBlob(Device *dev) const; +}; + +class Crtc : public Object +{ +public: + Crtc(Device *dev, const drmModeCrtc *crtc, unsigned int index); + + unsigned int index() const { return index_; } + const std::vector &planes() const { return planes_; } + +private: + friend Device; + + unsigned int index_; + std::vector planes_; +}; + +class Encoder : public Object +{ +public: + Encoder(Device *dev, const drmModeEncoder *encoder); + + uint32_t type() const { return type_; } + + const std::vector &possibleCrtcs() const { return possibleCrtcs_; } + +private: + uint32_t type_; + std::vector possibleCrtcs_; +}; + +class Connector : public Object +{ +public: + enum Status { + Connected, + Disconnected, + Unknown, + }; + + Connector(Device *dev, const drmModeConnector *connector); + + uint32_t type() const { return type_; } + const std::string &name() const { return name_; } + + Status status() const { return status_; } + + const std::vector &encoders() const { return encoders_; } + const std::vector &modes() const { return modes_; } + +private: + uint32_t type_; + std::string name_; + Status status_; + std::vector encoders_; + std::vector modes_; +}; + +class Plane : public Object +{ +public: + enum Type { + TypeOverlay, + TypePrimary, + TypeCursor, + }; + + Plane(Device *dev, const drmModePlane *plane); + + Type type() const { return type_; } + const std::vector &formats() const { return formats_; } + const std::vector &possibleCrtcs() const { return possibleCrtcs_; } + + bool supportsFormat(const libcamera::PixelFormat &format) const; + +protected: + int setup() override; + +private: + friend class Device; + + Type type_; + std::vector formats_; + std::vector possibleCrtcs_; + uint32_t possibleCrtcsMask_; +}; + +class FrameBuffer : public Object +{ +public: + struct Plane { + uint32_t handle; + }; + + ~FrameBuffer(); + +private: + friend class Device; + + FrameBuffer(Device *dev); + + std::map planes_; +}; + +class AtomicRequest +{ +public: + enum Flags { + FlagAllowModeset = (1 << 0), + FlagAsync = (1 << 1), + FlagTestOnly = (1 << 2), + }; + + AtomicRequest(Device *dev); + ~AtomicRequest(); + + Device *device() const { return dev_; } + bool isValid() const { return valid_; } + + int addProperty(const Object *object, const std::string &property, + uint64_t value); + int addProperty(const Object *object, const std::string &property, + std::unique_ptr blob); + int commit(unsigned int flags = 0); + +private: + AtomicRequest(const AtomicRequest &) = delete; + AtomicRequest(const AtomicRequest &&) = delete; + AtomicRequest &operator=(const AtomicRequest &) = delete; + AtomicRequest &operator=(const AtomicRequest &&) = delete; + + int addProperty(uint32_t object, uint32_t property, uint64_t value); + + Device *dev_; + bool valid_; + drmModeAtomicReq *request_; + std::list> blobs_; +}; + +class Device +{ +public: + Device(); + ~Device(); + + int init(); + + int fd() const { return fd_; } + + const std::list &crtcs() const { return crtcs_; } + const std::list &encoders() const { return encoders_; } + const std::list &connectors() const { return connectors_; } + const std::list &planes() const { return planes_; } + const std::list &properties() const { return properties_; } + + const Object *object(uint32_t id); + + std::unique_ptr createFrameBuffer( + const libcamera::FrameBuffer &buffer, + const libcamera::PixelFormat &format, + const libcamera::Size &size, + const std::array &strides); + + libcamera::Signal requestComplete; + +private: + Device(const Device &) = delete; + Device(const Device &&) = delete; + Device &operator=(const Device &) = delete; + Device &operator=(const Device &&) = delete; + + int openCard(); + int getResources(); + + void drmEvent(); + static void pageFlipComplete(int fd, unsigned int sequence, + unsigned int tv_sec, unsigned int tv_usec, + void *user_data); + + int fd_; + + std::list crtcs_; + std::list encoders_; + std::list connectors_; + std::list planes_; + std::list properties_; + + std::map objects_; +}; + +} /* namespace DRM */ diff --git a/spider-cam/libcamera/src/apps/cam/file_sink.cpp b/spider-cam/libcamera/src/apps/cam/file_sink.cpp new file mode 100644 index 0000000..3e000d2 --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/file_sink.cpp @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * File Sink + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "../common/dng_writer.h" +#include "../common/image.h" +#include "../common/ppm_writer.h" + +#include "file_sink.h" + +using namespace libcamera; + +FileSink::FileSink([[maybe_unused]] const libcamera::Camera *camera, + const std::map &streamNames, + const std::string &pattern) + : +#ifdef HAVE_TIFF + camera_(camera), +#endif + streamNames_(streamNames), pattern_(pattern) +{ +} + +FileSink::~FileSink() +{ +} + +int FileSink::configure(const libcamera::CameraConfiguration &config) +{ + int ret = FrameSink::configure(config); + if (ret < 0) + return ret; + + return 0; +} + +void FileSink::mapBuffer(FrameBuffer *buffer) +{ + std::unique_ptr image = + Image::fromFrameBuffer(buffer, Image::MapMode::ReadOnly); + assert(image != nullptr); + + mappedBuffers_[buffer] = std::move(image); +} + +bool FileSink::processRequest(Request *request) +{ + for (auto [stream, buffer] : request->buffers()) + writeBuffer(stream, buffer, request->metadata()); + + return true; +} + +void FileSink::writeBuffer(const Stream *stream, FrameBuffer *buffer, + [[maybe_unused]] const ControlList &metadata) +{ + std::string filename; + size_t pos; + int fd, ret = 0; + + if (!pattern_.empty()) + filename = pattern_; + +#ifdef HAVE_TIFF + bool dng = filename.find(".dng", filename.size() - 4) != std::string::npos; +#endif /* HAVE_TIFF */ + bool ppm = filename.find(".ppm", filename.size() - 4) != std::string::npos; + + if (filename.empty() || filename.back() == '/') + filename += "frame-#.bin"; + + pos = filename.find_first_of('#'); + if (pos != std::string::npos) { + std::stringstream ss; + ss << streamNames_[stream] << "-" << std::setw(6) + << std::setfill('0') << buffer->metadata().sequence; + filename.replace(pos, 1, ss.str()); + } + + Image *image = mappedBuffers_[buffer].get(); + +#ifdef HAVE_TIFF + if (dng) { + ret = DNGWriter::write(filename.c_str(), camera_, + stream->configuration(), metadata, + buffer, image->data(0).data()); + if (ret < 0) + std::cerr << "failed to write DNG file `" << filename + << "'" << std::endl; + + return; + } +#endif /* HAVE_TIFF */ + if (ppm) { + ret = PPMWriter::write(filename.c_str(), stream->configuration(), + image->data(0)); + if (ret < 0) + std::cerr << "failed to write PPM file `" << filename + << "'" << std::endl; + + return; + } + + fd = open(filename.c_str(), O_CREAT | O_WRONLY | + (pos == std::string::npos ? O_APPEND : O_TRUNC), + S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH); + if (fd == -1) { + ret = -errno; + std::cerr << "failed to open file " << filename << ": " + << strerror(-ret) << std::endl; + return; + } + + for (unsigned int i = 0; i < buffer->planes().size(); ++i) { + /* + * This was formerly a local "const FrameMetadata::Plane &" + * however this causes a false positive warning for dangling + * references on gcc 13. + */ + const unsigned int bytesused = buffer->metadata().planes()[i].bytesused; + + Span data = image->data(i); + const unsigned int length = std::min(bytesused, data.size()); + + if (bytesused > data.size()) + std::cerr << "payload size " << bytesused + << " larger than plane size " << data.size() + << std::endl; + + ret = ::write(fd, data.data(), length); + if (ret < 0) { + ret = -errno; + std::cerr << "write error: " << strerror(-ret) + << std::endl; + break; + } else if (ret != (int)length) { + std::cerr << "write error: only " << ret + << " bytes written instead of " + << length << std::endl; + break; + } + } + + close(fd); +} diff --git a/spider-cam/libcamera/src/apps/cam/file_sink.h b/spider-cam/libcamera/src/apps/cam/file_sink.h new file mode 100644 index 0000000..9d56078 --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/file_sink.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * File Sink + */ + +#pragma once + +#include +#include +#include + +#include + +#include "frame_sink.h" + +class Image; + +class FileSink : public FrameSink +{ +public: + FileSink(const libcamera::Camera *camera, + const std::map &streamNames, + const std::string &pattern = ""); + ~FileSink(); + + int configure(const libcamera::CameraConfiguration &config) override; + + void mapBuffer(libcamera::FrameBuffer *buffer) override; + + bool processRequest(libcamera::Request *request) override; + +private: + void writeBuffer(const libcamera::Stream *stream, + libcamera::FrameBuffer *buffer, + const libcamera::ControlList &metadata); + +#ifdef HAVE_TIFF + const libcamera::Camera *camera_; +#endif + std::map streamNames_; + std::string pattern_; + std::map> mappedBuffers_; +}; diff --git a/spider-cam/libcamera/src/apps/cam/frame_sink.cpp b/spider-cam/libcamera/src/apps/cam/frame_sink.cpp new file mode 100644 index 0000000..68d6f2c --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/frame_sink.cpp @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2021, Ideas on Board Oy + * + * Base Frame Sink Class + */ + +#include "frame_sink.h" + +/** + * \class FrameSink + * \brief Abstract class to model a consumer of frames + * + * The FrameSink class models the consumer that processes frames after a request + * completes. It receives requests through processRequest(), and processes them + * synchronously or asynchronously. This allows frame sinks to hold onto frames + * for an extended period of time, for instance to display them until a new + * frame arrives. + * + * A frame sink processes whole requests, and is solely responsible for deciding + * how to handle different frame buffers in case multiple streams are captured. + */ + +FrameSink::~FrameSink() +{ +} + +int FrameSink::configure([[maybe_unused]] const libcamera::CameraConfiguration &config) +{ + return 0; +} + +void FrameSink::mapBuffer([[maybe_unused]] libcamera::FrameBuffer *buffer) +{ +} + +int FrameSink::start() +{ + return 0; +} + +int FrameSink::stop() +{ + return 0; +} + +/** + * \fn FrameSink::processRequest() + * \param[in] request The request + * + * This function is called to instruct the sink to process a request. The sink + * may process the request synchronously or queue it for asynchronous + * processing. + * + * When the request is processed synchronously, this function shall return true. + * The \a request shall not be accessed by the FrameSink after the function + * returns. + * + * When the request is processed asynchronously, the FrameSink temporarily takes + * ownership of the \a request. The function shall return false, and the + * FrameSink shall emit the requestProcessed signal when the request processing + * completes. If the stop() function is called before the request processing + * completes, it shall release the request synchronously. + * + * \return True if the request has been processed synchronously, false if + * processing has been queued + */ diff --git a/spider-cam/libcamera/src/apps/cam/frame_sink.h b/spider-cam/libcamera/src/apps/cam/frame_sink.h new file mode 100644 index 0000000..11105c6 --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/frame_sink.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2021, Ideas on Board Oy + * + * Base Frame Sink Class + */ + +#pragma once + +#include + +namespace libcamera { +class CameraConfiguration; +class FrameBuffer; +class Request; +} /* namespace libcamera */ + +class FrameSink +{ +public: + virtual ~FrameSink(); + + virtual int configure(const libcamera::CameraConfiguration &config); + + virtual void mapBuffer(libcamera::FrameBuffer *buffer); + + virtual int start(); + virtual int stop(); + + virtual bool processRequest(libcamera::Request *request) = 0; + libcamera::Signal requestProcessed; +}; diff --git a/spider-cam/libcamera/src/apps/cam/kms_sink.cpp b/spider-cam/libcamera/src/apps/cam/kms_sink.cpp new file mode 100644 index 0000000..672c985 --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/kms_sink.cpp @@ -0,0 +1,536 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2021, Ideas on Board Oy + * + * KMS Sink + */ + +#include "kms_sink.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "drm.h" + +KMSSink::KMSSink(const std::string &connectorName) + : connector_(nullptr), crtc_(nullptr), plane_(nullptr), mode_(nullptr) +{ + int ret = dev_.init(); + if (ret < 0) + return; + + /* + * Find the requested connector. If no specific connector is requested, + * pick the first connected connector or, if no connector is connected, + * the first connector with unknown status. + */ + for (const DRM::Connector &conn : dev_.connectors()) { + if (!connectorName.empty()) { + if (conn.name() != connectorName) + continue; + + connector_ = &conn; + break; + } + + if (conn.status() == DRM::Connector::Connected) { + connector_ = &conn; + break; + } + + if (!connector_ && conn.status() == DRM::Connector::Unknown) + connector_ = &conn; + } + + if (!connector_) { + if (!connectorName.empty()) + std::cerr + << "Connector " << connectorName << " not found" + << std::endl; + else + std::cerr << "No connected connector found" << std::endl; + return; + } + + dev_.requestComplete.connect(this, &KMSSink::requestComplete); +} + +void KMSSink::mapBuffer(libcamera::FrameBuffer *buffer) +{ + std::array strides = {}; + + /* \todo Should libcamera report per-plane strides ? */ + unsigned int uvStrideMultiplier; + + switch (format_) { + case libcamera::formats::NV24: + case libcamera::formats::NV42: + uvStrideMultiplier = 4; + break; + case libcamera::formats::YUV420: + case libcamera::formats::YVU420: + case libcamera::formats::YUV422: + uvStrideMultiplier = 1; + break; + default: + uvStrideMultiplier = 2; + break; + } + + strides[0] = stride_; + for (unsigned int i = 1; i < buffer->planes().size(); ++i) + strides[i] = stride_ * uvStrideMultiplier / 2; + + std::unique_ptr drmBuffer = + dev_.createFrameBuffer(*buffer, format_, size_, strides); + if (!drmBuffer) + return; + + buffers_.emplace(std::piecewise_construct, + std::forward_as_tuple(buffer), + std::forward_as_tuple(std::move(drmBuffer))); +} + +int KMSSink::configure(const libcamera::CameraConfiguration &config) +{ + if (!connector_) + return -EINVAL; + + crtc_ = nullptr; + plane_ = nullptr; + mode_ = nullptr; + + const libcamera::StreamConfiguration &cfg = config.at(0); + + /* Find the best mode for the stream size. */ + const std::vector &modes = connector_->modes(); + + unsigned int cfgArea = cfg.size.width * cfg.size.height; + unsigned int bestDistance = UINT_MAX; + + for (const DRM::Mode &mode : modes) { + unsigned int modeArea = mode.hdisplay * mode.vdisplay; + unsigned int distance = modeArea > cfgArea ? modeArea - cfgArea + : cfgArea - modeArea; + + if (distance < bestDistance) { + mode_ = &mode; + bestDistance = distance; + + /* + * If the sizes match exactly, there will be no better + * match. + */ + if (distance == 0) + break; + } + } + + if (!mode_) { + std::cerr << "No modes\n"; + return -EINVAL; + } + + int ret = configurePipeline(cfg.pixelFormat); + if (ret < 0) + return ret; + + size_ = cfg.size; + stride_ = cfg.stride; + + /* Configure color space. */ + colorEncoding_ = std::nullopt; + colorRange_ = std::nullopt; + + if (cfg.colorSpace->ycbcrEncoding == libcamera::ColorSpace::YcbcrEncoding::None) + return 0; + + /* + * The encoding and range enums are defined in the kernel but not + * exposed in public headers. + */ + enum drm_color_encoding { + DRM_COLOR_YCBCR_BT601, + DRM_COLOR_YCBCR_BT709, + DRM_COLOR_YCBCR_BT2020, + }; + + enum drm_color_range { + DRM_COLOR_YCBCR_LIMITED_RANGE, + DRM_COLOR_YCBCR_FULL_RANGE, + }; + + const DRM::Property *colorEncoding = plane_->property("COLOR_ENCODING"); + const DRM::Property *colorRange = plane_->property("COLOR_RANGE"); + + if (colorEncoding) { + drm_color_encoding encoding; + + switch (cfg.colorSpace->ycbcrEncoding) { + case libcamera::ColorSpace::YcbcrEncoding::Rec601: + default: + encoding = DRM_COLOR_YCBCR_BT601; + break; + case libcamera::ColorSpace::YcbcrEncoding::Rec709: + encoding = DRM_COLOR_YCBCR_BT709; + break; + case libcamera::ColorSpace::YcbcrEncoding::Rec2020: + encoding = DRM_COLOR_YCBCR_BT2020; + break; + } + + for (const auto &[id, name] : colorEncoding->enums()) { + if (id == encoding) { + colorEncoding_ = encoding; + break; + } + } + } + + if (colorRange) { + drm_color_range range; + + switch (cfg.colorSpace->range) { + case libcamera::ColorSpace::Range::Limited: + default: + range = DRM_COLOR_YCBCR_LIMITED_RANGE; + break; + case libcamera::ColorSpace::Range::Full: + range = DRM_COLOR_YCBCR_FULL_RANGE; + break; + } + + for (const auto &[id, name] : colorRange->enums()) { + if (id == range) { + colorRange_ = range; + break; + } + } + } + + if (!colorEncoding_ || !colorRange_) + std::cerr << "Color space " << cfg.colorSpace->toString() + << " not supported by the display device." + << " Colors may be wrong." << std::endl; + + return 0; +} + +int KMSSink::selectPipeline(const libcamera::PixelFormat &format) +{ + /* + * If the requested format has an alpha channel, also consider the X + * variant. + */ + libcamera::PixelFormat xFormat; + + switch (format) { + case libcamera::formats::ABGR8888: + xFormat = libcamera::formats::XBGR8888; + break; + case libcamera::formats::ARGB8888: + xFormat = libcamera::formats::XRGB8888; + break; + case libcamera::formats::BGRA8888: + xFormat = libcamera::formats::BGRX8888; + break; + case libcamera::formats::RGBA8888: + xFormat = libcamera::formats::RGBX8888; + break; + } + + /* + * Find a CRTC and plane suitable for the request format and the + * connector at the end of the pipeline. Restrict the search to primary + * planes for now. + */ + for (const DRM::Encoder *encoder : connector_->encoders()) { + for (const DRM::Crtc *crtc : encoder->possibleCrtcs()) { + for (const DRM::Plane *plane : crtc->planes()) { + if (plane->type() != DRM::Plane::TypePrimary) + continue; + + if (plane->supportsFormat(format)) { + crtc_ = crtc; + plane_ = plane; + format_ = format; + return 0; + } + + if (plane->supportsFormat(xFormat)) { + crtc_ = crtc; + plane_ = plane; + format_ = xFormat; + return 0; + } + } + } + } + + return -EPIPE; +} + +int KMSSink::configurePipeline(const libcamera::PixelFormat &format) +{ + const int ret = selectPipeline(format); + if (ret) { + std::cerr + << "Unable to find display pipeline for format " + << format << std::endl; + + return ret; + } + + std::cout + << "Using KMS plane " << plane_->id() << ", CRTC " << crtc_->id() + << ", connector " << connector_->name() + << " (" << connector_->id() << "), mode " << mode_->hdisplay + << "x" << mode_->vdisplay << "@" << mode_->vrefresh << std::endl; + + return 0; +} + +int KMSSink::start() +{ + int ret = FrameSink::start(); + if (ret < 0) + return ret; + + /* Disable all CRTCs and planes to start from a known valid state. */ + DRM::AtomicRequest request(&dev_); + + for (const DRM::Crtc &crtc : dev_.crtcs()) + request.addProperty(&crtc, "ACTIVE", 0); + + for (const DRM::Plane &plane : dev_.planes()) { + request.addProperty(&plane, "CRTC_ID", 0); + request.addProperty(&plane, "FB_ID", 0); + } + + ret = request.commit(DRM::AtomicRequest::FlagAllowModeset); + if (ret < 0) { + std::cerr + << "Failed to disable CRTCs and planes: " + << strerror(-ret) << std::endl; + return ret; + } + + return 0; +} + +int KMSSink::stop() +{ + /* Display pipeline. */ + DRM::AtomicRequest request(&dev_); + + request.addProperty(connector_, "CRTC_ID", 0); + request.addProperty(crtc_, "ACTIVE", 0); + request.addProperty(crtc_, "MODE_ID", 0); + request.addProperty(plane_, "CRTC_ID", 0); + request.addProperty(plane_, "FB_ID", 0); + + int ret = request.commit(DRM::AtomicRequest::FlagAllowModeset); + if (ret < 0) { + std::cerr + << "Failed to stop display pipeline: " + << strerror(-ret) << std::endl; + return ret; + } + + /* Free all buffers. */ + pending_.reset(); + queued_.reset(); + active_.reset(); + buffers_.clear(); + + return FrameSink::stop(); +} + +bool KMSSink::testModeSet(DRM::FrameBuffer *drmBuffer, + const libcamera::Rectangle &src, + const libcamera::Rectangle &dst) +{ + DRM::AtomicRequest drmRequest{ &dev_ }; + + drmRequest.addProperty(connector_, "CRTC_ID", crtc_->id()); + + drmRequest.addProperty(crtc_, "ACTIVE", 1); + drmRequest.addProperty(crtc_, "MODE_ID", mode_->toBlob(&dev_)); + + drmRequest.addProperty(plane_, "CRTC_ID", crtc_->id()); + drmRequest.addProperty(plane_, "FB_ID", drmBuffer->id()); + drmRequest.addProperty(plane_, "SRC_X", src.x << 16); + drmRequest.addProperty(plane_, "SRC_Y", src.y << 16); + drmRequest.addProperty(plane_, "SRC_W", src.width << 16); + drmRequest.addProperty(plane_, "SRC_H", src.height << 16); + drmRequest.addProperty(plane_, "CRTC_X", dst.x); + drmRequest.addProperty(plane_, "CRTC_Y", dst.y); + drmRequest.addProperty(plane_, "CRTC_W", dst.width); + drmRequest.addProperty(plane_, "CRTC_H", dst.height); + + return !drmRequest.commit(DRM::AtomicRequest::FlagAllowModeset | + DRM::AtomicRequest::FlagTestOnly); +} + +bool KMSSink::setupComposition(DRM::FrameBuffer *drmBuffer) +{ + /* + * Test composition options, from most to least desirable, to select the + * best one. + */ + const libcamera::Rectangle framebuffer{ size_ }; + const libcamera::Rectangle display{ 0, 0, mode_->hdisplay, mode_->vdisplay }; + + /* 1. Scale the frame buffer to full screen, preserving aspect ratio. */ + libcamera::Rectangle src = framebuffer; + libcamera::Rectangle dst = display.size().boundedToAspectRatio(framebuffer.size()) + .centeredTo(display.center()); + + if (testModeSet(drmBuffer, src, dst)) { + std::cout << "KMS: full-screen scaled output, square pixels" + << std::endl; + src_ = src; + dst_ = dst; + return true; + } + + /* + * 2. Scale the frame buffer to full screen, without preserving aspect + * ratio. + */ + src = framebuffer; + dst = display; + + if (testModeSet(drmBuffer, src, dst)) { + std::cout << "KMS: full-screen scaled output, non-square pixels" + << std::endl; + src_ = src; + dst_ = dst; + return true; + } + + /* 3. Center the frame buffer on the display. */ + src = display.size().centeredTo(framebuffer.center()).boundedTo(framebuffer); + dst = framebuffer.size().centeredTo(display.center()).boundedTo(display); + + if (testModeSet(drmBuffer, src, dst)) { + std::cout << "KMS: centered output" << std::endl; + src_ = src; + dst_ = dst; + return true; + } + + /* 4. Align the frame buffer on the top-left of the display. */ + src = framebuffer.boundedTo(display); + dst = display.boundedTo(framebuffer); + + if (testModeSet(drmBuffer, src, dst)) { + std::cout << "KMS: top-left aligned output" << std::endl; + src_ = src; + dst_ = dst; + return true; + } + + return false; +} + +bool KMSSink::processRequest(libcamera::Request *camRequest) +{ + /* + * Perform a very crude rate adaptation by simply dropping the request + * if the display queue is full. + */ + if (pending_) + return true; + + libcamera::FrameBuffer *buffer = camRequest->buffers().begin()->second; + auto iter = buffers_.find(buffer); + if (iter == buffers_.end()) + return true; + + DRM::FrameBuffer *drmBuffer = iter->second.get(); + + unsigned int flags = DRM::AtomicRequest::FlagAsync; + std::unique_ptr drmRequest = + std::make_unique(&dev_); + drmRequest->addProperty(plane_, "FB_ID", drmBuffer->id()); + + if (!active_ && !queued_) { + /* Enable the display pipeline on the first frame. */ + if (!setupComposition(drmBuffer)) { + std::cerr << "Failed to setup composition" << std::endl; + return true; + } + + drmRequest->addProperty(connector_, "CRTC_ID", crtc_->id()); + + drmRequest->addProperty(crtc_, "ACTIVE", 1); + drmRequest->addProperty(crtc_, "MODE_ID", mode_->toBlob(&dev_)); + + drmRequest->addProperty(plane_, "CRTC_ID", crtc_->id()); + drmRequest->addProperty(plane_, "SRC_X", src_.x << 16); + drmRequest->addProperty(plane_, "SRC_Y", src_.y << 16); + drmRequest->addProperty(plane_, "SRC_W", src_.width << 16); + drmRequest->addProperty(plane_, "SRC_H", src_.height << 16); + drmRequest->addProperty(plane_, "CRTC_X", dst_.x); + drmRequest->addProperty(plane_, "CRTC_Y", dst_.y); + drmRequest->addProperty(plane_, "CRTC_W", dst_.width); + drmRequest->addProperty(plane_, "CRTC_H", dst_.height); + + if (colorEncoding_) + drmRequest->addProperty(plane_, "COLOR_ENCODING", *colorEncoding_); + if (colorRange_) + drmRequest->addProperty(plane_, "COLOR_RANGE", *colorRange_); + + flags |= DRM::AtomicRequest::FlagAllowModeset; + } + + pending_ = std::make_unique(std::move(drmRequest), camRequest); + + std::lock_guard lock(lock_); + + if (!queued_) { + int ret = pending_->drmRequest_->commit(flags); + if (ret < 0) { + std::cerr + << "Failed to commit atomic request: " + << strerror(-ret) << std::endl; + /* \todo Implement error handling */ + } + + queued_ = std::move(pending_); + } + + return false; +} + +void KMSSink::requestComplete([[maybe_unused]] DRM::AtomicRequest *request) +{ + std::lock_guard lock(lock_); + + assert(queued_ && queued_->drmRequest_.get() == request); + + /* Complete the active request, if any. */ + if (active_) + requestProcessed.emit(active_->camRequest_); + + /* The queued request becomes active. */ + active_ = std::move(queued_); + + /* Queue the pending request, if any. */ + if (pending_) { + pending_->drmRequest_->commit(DRM::AtomicRequest::FlagAsync); + queued_ = std::move(pending_); + } +} diff --git a/spider-cam/libcamera/src/apps/cam/kms_sink.h b/spider-cam/libcamera/src/apps/cam/kms_sink.h new file mode 100644 index 0000000..4b7b4c2 --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/kms_sink.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2021, Ideas on Board Oy + * + * KMS Sink + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "drm.h" +#include "frame_sink.h" + +class KMSSink : public FrameSink +{ +public: + KMSSink(const std::string &connectorName); + + void mapBuffer(libcamera::FrameBuffer *buffer) override; + + int configure(const libcamera::CameraConfiguration &config) override; + int start() override; + int stop() override; + + bool processRequest(libcamera::Request *request) override; + +private: + class Request + { + public: + Request(std::unique_ptr drmRequest, + libcamera::Request *camRequest) + : drmRequest_(std::move(drmRequest)), camRequest_(camRequest) + { + } + + std::unique_ptr drmRequest_; + libcamera::Request *camRequest_; + }; + + int selectPipeline(const libcamera::PixelFormat &format); + int configurePipeline(const libcamera::PixelFormat &format); + bool testModeSet(DRM::FrameBuffer *drmBuffer, + const libcamera::Rectangle &src, + const libcamera::Rectangle &dst); + bool setupComposition(DRM::FrameBuffer *drmBuffer); + + void requestComplete(DRM::AtomicRequest *request); + + DRM::Device dev_; + + const DRM::Connector *connector_; + const DRM::Crtc *crtc_; + const DRM::Plane *plane_; + const DRM::Mode *mode_; + + libcamera::PixelFormat format_; + libcamera::Size size_; + unsigned int stride_; + std::optional colorEncoding_; + std::optional colorRange_; + + libcamera::Rectangle src_; + libcamera::Rectangle dst_; + + std::map> buffers_; + + std::mutex lock_; + std::unique_ptr pending_; + std::unique_ptr queued_; + std::unique_ptr active_; +}; diff --git a/spider-cam/libcamera/src/apps/cam/main.cpp b/spider-cam/libcamera/src/apps/cam/main.cpp new file mode 100644 index 0000000..460dbc8 --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/main.cpp @@ -0,0 +1,374 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * cam - The libcamera swiss army knife + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include "../common/event_loop.h" +#include "../common/options.h" +#include "../common/stream_options.h" + +#include "camera_session.h" +#include "main.h" + +using namespace libcamera; + +class CamApp +{ +public: + CamApp(); + + static CamApp *instance(); + + int init(int argc, char **argv); + void cleanup(); + + int exec(); + void quit(); + +private: + void cameraAdded(std::shared_ptr cam); + void cameraRemoved(std::shared_ptr cam); + void captureDone(); + int parseOptions(int argc, char *argv[]); + int run(); + + static std::string cameraName(const Camera *camera); + + static CamApp *app_; + OptionsParser::Options options_; + + std::unique_ptr cm_; + + std::atomic_uint loopUsers_; + EventLoop loop_; +}; + +CamApp *CamApp::app_ = nullptr; + +CamApp::CamApp() + : loopUsers_(0) +{ + CamApp::app_ = this; +} + +CamApp *CamApp::instance() +{ + return CamApp::app_; +} + +int CamApp::init(int argc, char **argv) +{ + int ret; + + ret = parseOptions(argc, argv); + if (ret < 0) + return ret; + + cm_ = std::make_unique(); + + ret = cm_->start(); + if (ret) { + std::cout << "Failed to start camera manager: " + << strerror(-ret) << std::endl; + return ret; + } + + return 0; +} + +void CamApp::cleanup() +{ + cm_->stop(); +} + +int CamApp::exec() +{ + int ret; + + ret = run(); + cleanup(); + + return ret; +} + +void CamApp::quit() +{ + loop_.exit(); +} + +int CamApp::parseOptions(int argc, char *argv[]) +{ + StreamKeyValueParser streamKeyValue; + + OptionsParser parser; + parser.addOption(OptCamera, OptionString, + "Specify which camera to operate on, by id or by index", "camera", + ArgumentRequired, "camera", true); + parser.addOption(OptHelp, OptionNone, "Display this help message", + "help"); + parser.addOption(OptInfo, OptionNone, + "Display information about stream(s)", "info"); + parser.addOption(OptList, OptionNone, "List all cameras", "list"); + parser.addOption(OptListControls, OptionNone, "List cameras controls", + "list-controls"); + parser.addOption(OptListProperties, OptionNone, "List cameras properties", + "list-properties"); + parser.addOption(OptMonitor, OptionNone, + "Monitor for hotplug and unplug camera events", + "monitor"); + + /* Sub-options of OptCamera: */ + parser.addOption(OptCapture, OptionInteger, + "Capture until interrupted by user or until frames captured", + "capture", ArgumentOptional, "count", false, + OptCamera); + + parser.addOption(OptOrientation, OptionString, + "Desired image orientation (rot0, rot180, mirror, flip)", + "orientation", ArgumentRequired, "orientation", false, + OptCamera); +#ifdef HAVE_KMS + parser.addOption(OptDisplay, OptionString, + "Display viewfinder through DRM/KMS on specified connector", + "display", ArgumentOptional, "connector", false, + OptCamera); +#endif + parser.addOption(OptFile, OptionString, + "Write captured frames to disk\n" + "If the file name ends with a '/', it sets the directory in which\n" + "to write files, using the default file name. Otherwise it sets the\n" + "full file path and name. The first '#' character in the file name\n" + "is expanded to the camera index, stream name and frame sequence number.\n" +#ifdef HAVE_TIFF + "If the file name ends with '.dng', then the frame will be written to\n" + "the output file(s) in DNG format.\n" +#endif + "If the file name ends with '.ppm', then the frame will be written to\n" + "the output file(s) in PPM format.\n" + "The default file name is 'frame-#.bin'.", + "file", ArgumentOptional, "filename", false, + OptCamera); +#ifdef HAVE_SDL + parser.addOption(OptSDL, OptionNone, "Display viewfinder through SDL", + "sdl", ArgumentNone, "", false, OptCamera); +#endif + parser.addOption(OptStream, &streamKeyValue, + "Set configuration of a camera stream", "stream", true, + OptCamera); + parser.addOption(OptStrictFormats, OptionNone, + "Do not allow requested stream format(s) to be adjusted", + "strict-formats", ArgumentNone, nullptr, false, + OptCamera); + parser.addOption(OptMetadata, OptionNone, + "Print the metadata for completed requests", + "metadata", ArgumentNone, nullptr, false, + OptCamera); + parser.addOption(OptCaptureScript, OptionString, + "Load a capture session configuration script from a file", + "script", ArgumentRequired, "script", false, + OptCamera); + + options_ = parser.parse(argc, argv); + if (!options_.valid()) + return -EINVAL; + + if (options_.empty() || options_.isSet(OptHelp)) { + parser.usage(); + return options_.empty() ? -EINVAL : -EINTR; + } + + return 0; +} + +void CamApp::cameraAdded(std::shared_ptr cam) +{ + std::cout << "Camera Added: " << cam->id() << std::endl; +} + +void CamApp::cameraRemoved(std::shared_ptr cam) +{ + std::cout << "Camera Removed: " << cam->id() << std::endl; +} + +void CamApp::captureDone() +{ + if (--loopUsers_ == 0) + EventLoop::instance()->exit(0); +} + +int CamApp::run() +{ + int ret; + + /* 1. List all cameras. */ + if (options_.isSet(OptList)) { + std::cout << "Available cameras:" << std::endl; + + unsigned int index = 1; + for (const std::shared_ptr &cam : cm_->cameras()) { + std::cout << index << ": " << cameraName(cam.get()) << std::endl; + index++; + } + } + + /* 2. Create the camera sessions. */ + std::vector> sessions; + + if (options_.isSet(OptCamera)) { + unsigned int index = 0; + + for (const OptionValue &camera : options_[OptCamera].toArray()) { + std::unique_ptr session = + std::make_unique(cm_.get(), + camera.toString(), + index, + camera.children()); + if (!session->isValid()) { + std::cout << "Failed to create camera session" << std::endl; + return -EINVAL; + } + + std::cout << "Using camera " << session->camera()->id() + << " as cam" << index << std::endl; + + session->captureDone.connect(this, &CamApp::captureDone); + + sessions.push_back(std::move(session)); + index++; + } + } + + /* 3. Print camera information. */ + if (options_.isSet(OptListControls) || + options_.isSet(OptListProperties) || + options_.isSet(OptInfo)) { + for (const auto &session : sessions) { + if (options_.isSet(OptListControls)) + session->listControls(); + if (options_.isSet(OptListProperties)) + session->listProperties(); + if (options_.isSet(OptInfo)) + session->infoConfiguration(); + } + } + + /* 4. Start capture. */ + for (const auto &session : sessions) { + if (!session->options().isSet(OptCapture)) + continue; + + ret = session->start(); + if (ret) { + std::cout << "Failed to start camera session" << std::endl; + return ret; + } + + loopUsers_++; + } + + /* 5. Enable hotplug monitoring. */ + if (options_.isSet(OptMonitor)) { + std::cout << "Monitoring new hotplug and unplug events" << std::endl; + std::cout << "Press Ctrl-C to interrupt" << std::endl; + + cm_->cameraAdded.connect(this, &CamApp::cameraAdded); + cm_->cameraRemoved.connect(this, &CamApp::cameraRemoved); + + loopUsers_++; + } + + if (loopUsers_) + loop_.exec(); + + /* 6. Stop capture. */ + for (const auto &session : sessions) { + if (!session->options().isSet(OptCapture)) + continue; + + session->stop(); + } + + return 0; +} + +std::string CamApp::cameraName(const Camera *camera) +{ + const ControlList &props = camera->properties(); + bool addModel = true; + std::string name; + + /* + * Construct the name from the camera location, model and ID. The model + * is only used if the location isn't present or is set to External. + */ + const auto &location = props.get(properties::Location); + if (location) { + switch (*location) { + case properties::CameraLocationFront: + addModel = false; + name = "Internal front camera "; + break; + case properties::CameraLocationBack: + addModel = false; + name = "Internal back camera "; + break; + case properties::CameraLocationExternal: + name = "External camera "; + break; + } + } + + if (addModel) { + /* + * If the camera location is not availble use the camera model + * to build the camera name. + */ + const auto &model = props.get(properties::Model); + if (model) + name = "'" + *model + "' "; + } + + name += "(" + camera->id() + ")"; + + return name; +} + +namespace { + +void signalHandler([[maybe_unused]] int signal) +{ + std::cout << "Exiting" << std::endl; + CamApp::instance()->quit(); +} + +} /* namespace */ + +int main(int argc, char **argv) +{ + CamApp app; + int ret; + + ret = app.init(argc, argv); + if (ret) + return ret == -EINTR ? 0 : EXIT_FAILURE; + + struct sigaction sa = {}; + sa.sa_handler = &signalHandler; + sigaction(SIGINT, &sa, nullptr); + + if (app.exec()) + return EXIT_FAILURE; + + return 0; +} diff --git a/spider-cam/libcamera/src/apps/cam/main.h b/spider-cam/libcamera/src/apps/cam/main.h new file mode 100644 index 0000000..64e6a20 --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/main.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * Cam application + */ + +#pragma once + +enum { + OptCamera = 'c', + OptCapture = 'C', + OptDisplay = 'D', + OptFile = 'F', + OptHelp = 'h', + OptInfo = 'I', + OptList = 'l', + OptListProperties = 'p', + OptMonitor = 'm', + OptOrientation = 'o', + OptSDL = 'S', + OptStream = 's', + OptListControls = 256, + OptStrictFormats = 257, + OptMetadata = 258, + OptCaptureScript = 259, +}; diff --git a/spider-cam/libcamera/src/apps/cam/meson.build b/spider-cam/libcamera/src/apps/cam/meson.build new file mode 100644 index 0000000..c70ca3c --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/meson.build @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: CC0-1.0 + +if opt_cam.disabled() or not libevent.found() + cam_enabled = false + subdir_done() +endif + +cam_enabled = true + +cam_sources = files([ + 'camera_session.cpp', + 'capture_script.cpp', + 'file_sink.cpp', + 'frame_sink.cpp', + 'main.cpp', +]) + +cam_cpp_args = [apps_cpp_args] + +libdrm = dependency('libdrm', required : false) +libjpeg = dependency('libjpeg', required : false) +libsdl2 = dependency('SDL2', required : false) + +if libdrm.found() + cam_cpp_args += [ '-DHAVE_KMS' ] + cam_sources += files([ + 'drm.cpp', + 'kms_sink.cpp' + ]) +endif + +if libsdl2.found() + cam_cpp_args += ['-DHAVE_SDL'] + cam_sources += files([ + 'sdl_sink.cpp', + 'sdl_texture.cpp', + 'sdl_texture_yuv.cpp', + ]) + + if libjpeg.found() + cam_cpp_args += ['-DHAVE_LIBJPEG'] + cam_sources += files([ + 'sdl_texture_mjpg.cpp' + ]) + endif +endif + +cam = executable('cam', cam_sources, + link_with : apps_lib, + dependencies : [ + libatomic, + libcamera_public, + libdrm, + libevent, + libjpeg, + libsdl2, + libtiff, + libyaml, + ], + cpp_args : cam_cpp_args, + install : true, + install_tag : 'bin') diff --git a/spider-cam/libcamera/src/apps/cam/sdl_sink.cpp b/spider-cam/libcamera/src/apps/cam/sdl_sink.cpp new file mode 100644 index 0000000..8355dd5 --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/sdl_sink.cpp @@ -0,0 +1,215 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2022, Ideas on Board Oy + * + * SDL Sink + */ + +#include "sdl_sink.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "../common/event_loop.h" +#include "../common/image.h" + +#ifdef HAVE_LIBJPEG +#include "sdl_texture_mjpg.h" +#endif +#include "sdl_texture_yuv.h" + +using namespace libcamera; + +using namespace std::chrono_literals; + +SDLSink::SDLSink() + : window_(nullptr), renderer_(nullptr), rect_({}), + init_(false) +{ +} + +SDLSink::~SDLSink() +{ + stop(); +} + +int SDLSink::configure(const libcamera::CameraConfiguration &config) +{ + int ret = FrameSink::configure(config); + if (ret < 0) + return ret; + + if (config.size() > 1) { + std::cerr + << "SDL sink only supports one camera stream at present, streaming first camera stream" + << std::endl; + } else if (config.empty()) { + std::cerr << "Require at least one camera stream to process" + << std::endl; + return -EINVAL; + } + + const libcamera::StreamConfiguration &cfg = config.at(0); + rect_.w = cfg.size.width; + rect_.h = cfg.size.height; + + switch (cfg.pixelFormat) { +#ifdef HAVE_LIBJPEG + case libcamera::formats::MJPEG: + texture_ = std::make_unique(rect_); + break; +#endif +#if SDL_VERSION_ATLEAST(2, 0, 16) + case libcamera::formats::NV12: + texture_ = std::make_unique(rect_, cfg.stride); + break; +#endif + case libcamera::formats::YUYV: + texture_ = std::make_unique(rect_, cfg.stride); + break; + default: + std::cerr << "Unsupported pixel format " + << cfg.pixelFormat.toString() << std::endl; + return -EINVAL; + }; + + return 0; +} + +int SDLSink::start() +{ + int ret = SDL_Init(SDL_INIT_VIDEO); + if (ret) { + std::cerr << "Failed to initialize SDL: " << SDL_GetError() + << std::endl; + return ret; + } + + init_ = true; + window_ = SDL_CreateWindow("", SDL_WINDOWPOS_UNDEFINED, + SDL_WINDOWPOS_UNDEFINED, rect_.w, + rect_.h, + SDL_WINDOW_SHOWN | SDL_WINDOW_RESIZABLE); + if (!window_) { + std::cerr << "Failed to create SDL window: " << SDL_GetError() + << std::endl; + return -EINVAL; + } + + renderer_ = SDL_CreateRenderer(window_, -1, 0); + if (!renderer_) { + std::cerr << "Failed to create SDL renderer: " << SDL_GetError() + << std::endl; + return -EINVAL; + } + + /* + * Set for scaling purposes, not critical, don't return in case of + * error. + */ + ret = SDL_RenderSetLogicalSize(renderer_, rect_.w, rect_.h); + if (ret) + std::cerr << "Failed to set SDL render logical size: " + << SDL_GetError() << std::endl; + + ret = texture_->create(renderer_); + if (ret) { + return ret; + } + + /* \todo Make the event cancellable to support stop/start cycles. */ + EventLoop::instance()->addTimerEvent( + 10ms, std::bind(&SDLSink::processSDLEvents, this)); + + return 0; +} + +int SDLSink::stop() +{ + texture_.reset(); + + if (renderer_) { + SDL_DestroyRenderer(renderer_); + renderer_ = nullptr; + } + + if (window_) { + SDL_DestroyWindow(window_); + window_ = nullptr; + } + + if (init_) { + SDL_Quit(); + init_ = false; + } + + return FrameSink::stop(); +} + +void SDLSink::mapBuffer(FrameBuffer *buffer) +{ + std::unique_ptr image = + Image::fromFrameBuffer(buffer, Image::MapMode::ReadOnly); + assert(image != nullptr); + + mappedBuffers_[buffer] = std::move(image); +} + +bool SDLSink::processRequest(Request *request) +{ + for (auto [stream, buffer] : request->buffers()) { + renderBuffer(buffer); + break; /* to be expanded to launch SDL window per buffer */ + } + + return true; +} + +/* + * Process SDL events, required for things like window resize and quit button + */ +void SDLSink::processSDLEvents() +{ + for (SDL_Event e; SDL_PollEvent(&e);) { + if (e.type == SDL_QUIT) { + /* Click close icon then quit */ + EventLoop::instance()->exit(0); + } + } +} + +void SDLSink::renderBuffer(FrameBuffer *buffer) +{ + Image *image = mappedBuffers_[buffer].get(); + + std::vector> planes; + unsigned int i = 0; + + planes.reserve(buffer->metadata().planes().size()); + + for (const FrameMetadata::Plane &meta : buffer->metadata().planes()) { + Span data = image->data(i); + if (meta.bytesused > data.size()) + std::cerr << "payload size " << meta.bytesused + << " larger than plane size " << data.size() + << std::endl; + + planes.push_back(data); + i++; + } + + texture_->update(planes); + + SDL_RenderClear(renderer_); + SDL_RenderCopy(renderer_, texture_->get(), nullptr, nullptr); + SDL_RenderPresent(renderer_); +} diff --git a/spider-cam/libcamera/src/apps/cam/sdl_sink.h b/spider-cam/libcamera/src/apps/cam/sdl_sink.h new file mode 100644 index 0000000..18ec7fb --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/sdl_sink.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2022, Ideas on Board Oy + * + * SDL Sink + */ + +#pragma once + +#include +#include + +#include + +#include + +#include "frame_sink.h" + +class Image; +class SDLTexture; + +class SDLSink : public FrameSink +{ +public: + SDLSink(); + ~SDLSink(); + + int configure(const libcamera::CameraConfiguration &config) override; + int start() override; + int stop() override; + void mapBuffer(libcamera::FrameBuffer *buffer) override; + + bool processRequest(libcamera::Request *request) override; + +private: + void renderBuffer(libcamera::FrameBuffer *buffer); + void processSDLEvents(); + + std::map> + mappedBuffers_; + + std::unique_ptr texture_; + + SDL_Window *window_; + SDL_Renderer *renderer_; + SDL_Rect rect_; + bool init_; +}; diff --git a/spider-cam/libcamera/src/apps/cam/sdl_texture.cpp b/spider-cam/libcamera/src/apps/cam/sdl_texture.cpp new file mode 100644 index 0000000..e52c4a3 --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/sdl_texture.cpp @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2022, Ideas on Board Oy + * + * SDL Texture + */ + +#include "sdl_texture.h" + +#include + +SDLTexture::SDLTexture(const SDL_Rect &rect, uint32_t pixelFormat, + const int stride) + : ptr_(nullptr), rect_(rect), pixelFormat_(pixelFormat), stride_(stride) +{ +} + +SDLTexture::~SDLTexture() +{ + if (ptr_) + SDL_DestroyTexture(ptr_); +} + +int SDLTexture::create(SDL_Renderer *renderer) +{ + ptr_ = SDL_CreateTexture(renderer, pixelFormat_, + SDL_TEXTUREACCESS_STREAMING, rect_.w, + rect_.h); + if (!ptr_) { + std::cerr << "Failed to create SDL texture: " << SDL_GetError() + << std::endl; + return -ENOMEM; + } + + return 0; +} diff --git a/spider-cam/libcamera/src/apps/cam/sdl_texture.h b/spider-cam/libcamera/src/apps/cam/sdl_texture.h new file mode 100644 index 0000000..990f83b --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/sdl_texture.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2022, Ideas on Board Oy + * + * SDL Texture + */ + +#pragma once + +#include + +#include + +#include "../common/image.h" + +class SDLTexture +{ +public: + SDLTexture(const SDL_Rect &rect, uint32_t pixelFormat, const int stride); + virtual ~SDLTexture(); + int create(SDL_Renderer *renderer); + virtual void update(const std::vector> &data) = 0; + SDL_Texture *get() const { return ptr_; } + +protected: + SDL_Texture *ptr_; + const SDL_Rect rect_; + const uint32_t pixelFormat_; + const int stride_; +}; diff --git a/spider-cam/libcamera/src/apps/cam/sdl_texture_mjpg.cpp b/spider-cam/libcamera/src/apps/cam/sdl_texture_mjpg.cpp new file mode 100644 index 0000000..cace18f --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/sdl_texture_mjpg.cpp @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2022, Ideas on Board Oy + * + * SDL Texture MJPG + */ + +#include "sdl_texture_mjpg.h" + +#include +#include +#include + +#include + +using namespace libcamera; + +struct JpegErrorManager : public jpeg_error_mgr { + JpegErrorManager() + { + jpeg_std_error(this); + error_exit = errorExit; + output_message = outputMessage; + } + + static void errorExit(j_common_ptr cinfo) + { + JpegErrorManager *self = + static_cast(cinfo->err); + longjmp(self->escape_, 1); + } + + static void outputMessage([[maybe_unused]] j_common_ptr cinfo) + { + } + + jmp_buf escape_; +}; + +SDLTextureMJPG::SDLTextureMJPG(const SDL_Rect &rect) + : SDLTexture(rect, SDL_PIXELFORMAT_RGB24, rect.w * 3), + rgb_(std::make_unique(stride_ * rect.h)) +{ +} + +int SDLTextureMJPG::decompress(Span data) +{ + struct jpeg_decompress_struct cinfo; + + JpegErrorManager errorManager; + if (setjmp(errorManager.escape_)) { + /* libjpeg found an error */ + jpeg_destroy_decompress(&cinfo); + std::cerr << "JPEG decompression error" << std::endl; + return -EINVAL; + } + + cinfo.err = &errorManager; + jpeg_create_decompress(&cinfo); + + jpeg_mem_src(&cinfo, data.data(), data.size()); + + jpeg_read_header(&cinfo, TRUE); + + jpeg_start_decompress(&cinfo); + + for (int i = 0; cinfo.output_scanline < cinfo.output_height; ++i) { + JSAMPROW rowptr = rgb_.get() + i * stride_; + jpeg_read_scanlines(&cinfo, &rowptr, 1); + } + + jpeg_finish_decompress(&cinfo); + + jpeg_destroy_decompress(&cinfo); + + return 0; +} + +void SDLTextureMJPG::update(const std::vector> &data) +{ + decompress(data[0]); + SDL_UpdateTexture(ptr_, nullptr, rgb_.get(), stride_); +} diff --git a/spider-cam/libcamera/src/apps/cam/sdl_texture_mjpg.h b/spider-cam/libcamera/src/apps/cam/sdl_texture_mjpg.h new file mode 100644 index 0000000..37bed5f --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/sdl_texture_mjpg.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2022, Ideas on Board Oy + * + * SDL Texture MJPG + */ + +#pragma once + +#include "sdl_texture.h" + +class SDLTextureMJPG : public SDLTexture +{ +public: + SDLTextureMJPG(const SDL_Rect &rect); + + void update(const std::vector> &data) override; + +private: + int decompress(libcamera::Span data); + + std::unique_ptr rgb_; +}; diff --git a/spider-cam/libcamera/src/apps/cam/sdl_texture_yuv.cpp b/spider-cam/libcamera/src/apps/cam/sdl_texture_yuv.cpp new file mode 100644 index 0000000..480d7a3 --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/sdl_texture_yuv.cpp @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2022, Ideas on Board Oy + * + * SDL YUV Textures + */ + +#include "sdl_texture_yuv.h" + +using namespace libcamera; + +#if SDL_VERSION_ATLEAST(2, 0, 16) +SDLTextureNV12::SDLTextureNV12(const SDL_Rect &rect, unsigned int stride) + : SDLTexture(rect, SDL_PIXELFORMAT_NV12, stride) +{ +} + +void SDLTextureNV12::update(const std::vector> &data) +{ + SDL_UpdateNVTexture(ptr_, &rect_, data[0].data(), stride_, + data[1].data(), stride_); +} +#endif + +SDLTextureYUYV::SDLTextureYUYV(const SDL_Rect &rect, unsigned int stride) + : SDLTexture(rect, SDL_PIXELFORMAT_YUY2, stride) +{ +} + +void SDLTextureYUYV::update(const std::vector> &data) +{ + SDL_UpdateTexture(ptr_, &rect_, data[0].data(), stride_); +} diff --git a/spider-cam/libcamera/src/apps/cam/sdl_texture_yuv.h b/spider-cam/libcamera/src/apps/cam/sdl_texture_yuv.h new file mode 100644 index 0000000..29c756e --- /dev/null +++ b/spider-cam/libcamera/src/apps/cam/sdl_texture_yuv.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2022, Ideas on Board Oy + * + * SDL YUV Textures + */ + +#pragma once + +#include "sdl_texture.h" + +#if SDL_VERSION_ATLEAST(2, 0, 16) +class SDLTextureNV12 : public SDLTexture +{ +public: + SDLTextureNV12(const SDL_Rect &rect, unsigned int stride); + void update(const std::vector> &data) override; +}; +#endif + +class SDLTextureYUYV : public SDLTexture +{ +public: + SDLTextureYUYV(const SDL_Rect &rect, unsigned int stride); + void update(const std::vector> &data) override; +}; diff --git a/spider-cam/libcamera/src/apps/common/dng_writer.cpp b/spider-cam/libcamera/src/apps/common/dng_writer.cpp new file mode 100644 index 0000000..355433b --- /dev/null +++ b/spider-cam/libcamera/src/apps/common/dng_writer.cpp @@ -0,0 +1,808 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Raspberry Pi Ltd + * + * DNG writer + */ + +#include "dng_writer.h" + +#include +#include +#include +#include + +#include + +#include +#include +#include + +using namespace libcamera; + +enum CFAPatternColour : uint8_t { + CFAPatternRed = 0, + CFAPatternGreen = 1, + CFAPatternBlue = 2, +}; + +struct FormatInfo { + uint8_t bitsPerSample; + CFAPatternColour pattern[4]; + void (*packScanline)(void *output, const void *input, + unsigned int width); + void (*thumbScanline)(const FormatInfo &info, void *output, + const void *input, unsigned int width, + unsigned int stride); +}; + +struct Matrix3d { + Matrix3d() + { + } + + Matrix3d(float m0, float m1, float m2, + float m3, float m4, float m5, + float m6, float m7, float m8) + { + m[0] = m0, m[1] = m1, m[2] = m2; + m[3] = m3, m[4] = m4, m[5] = m5; + m[6] = m6, m[7] = m7, m[8] = m8; + } + + Matrix3d(const Span &span) + : Matrix3d(span[0], span[1], span[2], + span[3], span[4], span[5], + span[6], span[7], span[8]) + { + } + + static Matrix3d diag(float diag0, float diag1, float diag2) + { + return Matrix3d(diag0, 0, 0, 0, diag1, 0, 0, 0, diag2); + } + + static Matrix3d identity() + { + return Matrix3d(1, 0, 0, 0, 1, 0, 0, 0, 1); + } + + Matrix3d transpose() const + { + return { m[0], m[3], m[6], m[1], m[4], m[7], m[2], m[5], m[8] }; + } + + Matrix3d cofactors() const + { + return { m[4] * m[8] - m[5] * m[7], + -(m[3] * m[8] - m[5] * m[6]), + m[3] * m[7] - m[4] * m[6], + -(m[1] * m[8] - m[2] * m[7]), + m[0] * m[8] - m[2] * m[6], + -(m[0] * m[7] - m[1] * m[6]), + m[1] * m[5] - m[2] * m[4], + -(m[0] * m[5] - m[2] * m[3]), + m[0] * m[4] - m[1] * m[3] }; + } + + Matrix3d adjugate() const + { + return cofactors().transpose(); + } + + float determinant() const + { + return m[0] * (m[4] * m[8] - m[5] * m[7]) - + m[1] * (m[3] * m[8] - m[5] * m[6]) + + m[2] * (m[3] * m[7] - m[4] * m[6]); + } + + Matrix3d inverse() const + { + return adjugate() * (1.0 / determinant()); + } + + Matrix3d operator*(const Matrix3d &other) const + { + Matrix3d result; + for (unsigned int i = 0; i < 3; i++) { + for (unsigned int j = 0; j < 3; j++) { + result.m[i * 3 + j] = + m[i * 3 + 0] * other.m[0 + j] + + m[i * 3 + 1] * other.m[3 + j] + + m[i * 3 + 2] * other.m[6 + j]; + } + } + return result; + } + + Matrix3d operator*(float f) const + { + Matrix3d result; + for (unsigned int i = 0; i < 9; i++) + result.m[i] = m[i] * f; + return result; + } + + float m[9]; +}; + +namespace { + +void packScanlineRaw8(void *output, const void *input, unsigned int width) +{ + const uint8_t *in = static_cast(input); + uint8_t *out = static_cast(output); + + std::copy(in, in + width, out); +} + +void packScanlineRaw10(void *output, const void *input, unsigned int width) +{ + const uint8_t *in = static_cast(input); + uint8_t *out = static_cast(output); + + for (unsigned int i = 0; i < width; i += 4) { + *out++ = in[1] << 6 | in[0] >> 2; + *out++ = in[0] << 6 | (in[3] & 0x03) << 4 | in[2] >> 4; + *out++ = in[2] << 4 | (in[5] & 0x03) << 2 | in[4] >> 6; + *out++ = in[4] << 2 | (in[7] & 0x03) << 0; + *out++ = in[6]; + in += 8; + } +} + +void packScanlineRaw12(void *output, const void *input, unsigned int width) +{ + const uint8_t *in = static_cast(input); + uint8_t *out = static_cast(output); + + for (unsigned int i = 0; i < width; i += 2) { + *out++ = in[1] << 4 | in[0] >> 4; + *out++ = in[0] << 4 | (in[3] & 0x0f); + *out++ = in[2]; + in += 4; + } +} + +void packScanlineRaw16(void *output, const void *input, unsigned int width) +{ + const uint16_t *in = static_cast(input); + uint16_t *out = static_cast(output); + + std::copy(in, in + width, out); +} + +/* Thumbnail function for raw data with each pixel aligned to 16bit. */ +void thumbScanlineRaw(const FormatInfo &info, void *output, const void *input, + unsigned int width, unsigned int stride) +{ + const uint16_t *in = static_cast(input); + const uint16_t *in2 = static_cast(input) + stride / 2; + uint8_t *out = static_cast(output); + + /* Shift down to 8. */ + unsigned int shift = info.bitsPerSample - 8; + + /* Simple averaging that produces greyscale RGB values. */ + for (unsigned int x = 0; x < width; x++) { + uint16_t value = (le16toh(in[0]) + le16toh(in[1]) + + le16toh(in2[0]) + le16toh(in2[1])) >> 2; + value = value >> shift; + *out++ = value; + *out++ = value; + *out++ = value; + in += 16; + in2 += 16; + } +} + +void packScanlineRaw10_CSI2P(void *output, const void *input, unsigned int width) +{ + const uint8_t *in = static_cast(input); + uint8_t *out = static_cast(output); + + /* \todo Can this be made more efficient? */ + for (unsigned int x = 0; x < width; x += 4) { + *out++ = in[0]; + *out++ = (in[4] & 0x03) << 6 | in[1] >> 2; + *out++ = (in[1] & 0x03) << 6 | (in[4] & 0x0c) << 2 | in[2] >> 4; + *out++ = (in[2] & 0x0f) << 4 | (in[4] & 0x30) >> 2 | in[3] >> 6; + *out++ = (in[3] & 0x3f) << 2 | (in[4] & 0xc0) >> 6; + in += 5; + } +} + +void packScanlineRaw12_CSI2P(void *output, const void *input, unsigned int width) +{ + const uint8_t *in = static_cast(input); + uint8_t *out = static_cast(output); + + /* \todo Can this be made more efficient? */ + for (unsigned int i = 0; i < width; i += 2) { + *out++ = in[0]; + *out++ = (in[2] & 0x0f) << 4 | in[1] >> 4; + *out++ = (in[1] & 0x0f) << 4 | in[2] >> 4; + in += 3; + } +} + +void thumbScanlineRaw_CSI2P(const FormatInfo &info, void *output, + const void *input, unsigned int width, + unsigned int stride) +{ + const uint8_t *in = static_cast(input); + uint8_t *out = static_cast(output); + + /* Number of bytes corresponding to 16 pixels. */ + unsigned int skip = info.bitsPerSample * 16 / 8; + + for (unsigned int x = 0; x < width; x++) { + uint8_t value = (in[0] + in[1] + in[stride] + in[stride + 1]) >> 2; + *out++ = value; + *out++ = value; + *out++ = value; + in += skip; + } +} + +void packScanlineIPU3(void *output, const void *input, unsigned int width) +{ + const uint8_t *in = static_cast(input); + uint16_t *out = static_cast(output); + + /* + * Upscale the 10-bit format to 16-bit as it's not trivial to pack it + * as 10-bit without gaps. + * + * \todo Improve packing to keep the 10-bit sample size. + */ + unsigned int x = 0; + while (true) { + for (unsigned int i = 0; i < 6; i++) { + *out++ = (in[1] & 0x03) << 14 | (in[0] & 0xff) << 6; + if (++x >= width) + return; + + *out++ = (in[2] & 0x0f) << 12 | (in[1] & 0xfc) << 4; + if (++x >= width) + return; + + *out++ = (in[3] & 0x3f) << 10 | (in[2] & 0xf0) << 2; + if (++x >= width) + return; + + *out++ = (in[4] & 0xff) << 8 | (in[3] & 0xc0) << 0; + if (++x >= width) + return; + + in += 5; + } + + *out++ = (in[1] & 0x03) << 14 | (in[0] & 0xff) << 6; + if (++x >= width) + return; + + in += 2; + } +} + +void thumbScanlineIPU3([[maybe_unused]] const FormatInfo &info, void *output, + const void *input, unsigned int width, + unsigned int stride) +{ + uint8_t *out = static_cast(output); + + for (unsigned int x = 0; x < width; x++) { + unsigned int pixel = x * 16; + unsigned int block = pixel / 25; + unsigned int pixelInBlock = pixel - block * 25; + + /* + * If the pixel is the last in the block cheat a little and + * move one pixel backward to avoid reading between two blocks + * and having to deal with the padding bits. + */ + if (pixelInBlock == 24) + pixelInBlock--; + + const uint8_t *in = static_cast(input) + + block * 32 + (pixelInBlock / 4) * 5; + + uint16_t val1, val2, val3, val4; + switch (pixelInBlock % 4) { + default: + case 0: + val1 = (in[1] & 0x03) << 14 | (in[0] & 0xff) << 6; + val2 = (in[2] & 0x0f) << 12 | (in[1] & 0xfc) << 4; + val3 = (in[stride + 1] & 0x03) << 14 | (in[stride + 0] & 0xff) << 6; + val4 = (in[stride + 2] & 0x0f) << 12 | (in[stride + 1] & 0xfc) << 4; + break; + case 1: + val1 = (in[2] & 0x0f) << 12 | (in[1] & 0xfc) << 4; + val2 = (in[3] & 0x3f) << 10 | (in[2] & 0xf0) << 2; + val3 = (in[stride + 2] & 0x0f) << 12 | (in[stride + 1] & 0xfc) << 4; + val4 = (in[stride + 3] & 0x3f) << 10 | (in[stride + 2] & 0xf0) << 2; + break; + case 2: + val1 = (in[3] & 0x3f) << 10 | (in[2] & 0xf0) << 2; + val2 = (in[4] & 0xff) << 8 | (in[3] & 0xc0) << 0; + val3 = (in[stride + 3] & 0x3f) << 10 | (in[stride + 2] & 0xf0) << 2; + val4 = (in[stride + 4] & 0xff) << 8 | (in[stride + 3] & 0xc0) << 0; + break; + case 3: + val1 = (in[4] & 0xff) << 8 | (in[3] & 0xc0) << 0; + val2 = (in[6] & 0x03) << 14 | (in[5] & 0xff) << 6; + val3 = (in[stride + 4] & 0xff) << 8 | (in[stride + 3] & 0xc0) << 0; + val4 = (in[stride + 6] & 0x03) << 14 | (in[stride + 5] & 0xff) << 6; + break; + } + + uint8_t value = (val1 + val2 + val3 + val4) >> 10; + *out++ = value; + *out++ = value; + *out++ = value; + } +} + +const std::map formatInfo = { + { formats::SBGGR8, { + .bitsPerSample = 8, + .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed }, + .packScanline = packScanlineRaw8, + .thumbScanline = thumbScanlineRaw_CSI2P, + } }, + { formats::SGBRG8, { + .bitsPerSample = 8, + .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen }, + .packScanline = packScanlineRaw8, + .thumbScanline = thumbScanlineRaw_CSI2P, + } }, + { formats::SGRBG8, { + .bitsPerSample = 8, + .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen }, + .packScanline = packScanlineRaw8, + .thumbScanline = thumbScanlineRaw_CSI2P, + } }, + { formats::SRGGB8, { + .bitsPerSample = 8, + .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue }, + .packScanline = packScanlineRaw8, + .thumbScanline = thumbScanlineRaw_CSI2P, + } }, + { formats::SBGGR10, { + .bitsPerSample = 10, + .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed }, + .packScanline = packScanlineRaw10, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SGBRG10, { + .bitsPerSample = 10, + .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen }, + .packScanline = packScanlineRaw10, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SGRBG10, { + .bitsPerSample = 10, + .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen }, + .packScanline = packScanlineRaw10, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SRGGB10, { + .bitsPerSample = 10, + .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue }, + .packScanline = packScanlineRaw10, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SBGGR12, { + .bitsPerSample = 12, + .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed }, + .packScanline = packScanlineRaw12, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SGBRG12, { + .bitsPerSample = 12, + .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen }, + .packScanline = packScanlineRaw12, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SGRBG12, { + .bitsPerSample = 12, + .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen }, + .packScanline = packScanlineRaw12, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SRGGB12, { + .bitsPerSample = 12, + .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue }, + .packScanline = packScanlineRaw12, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SBGGR16, { + .bitsPerSample = 16, + .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed }, + .packScanline = packScanlineRaw16, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SGBRG16, { + .bitsPerSample = 16, + .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen }, + .packScanline = packScanlineRaw16, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SGRBG16, { + .bitsPerSample = 16, + .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen }, + .packScanline = packScanlineRaw16, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SRGGB16, { + .bitsPerSample = 16, + .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue }, + .packScanline = packScanlineRaw16, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SBGGR10_CSI2P, { + .bitsPerSample = 10, + .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed }, + .packScanline = packScanlineRaw10_CSI2P, + .thumbScanline = thumbScanlineRaw_CSI2P, + } }, + { formats::SGBRG10_CSI2P, { + .bitsPerSample = 10, + .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen }, + .packScanline = packScanlineRaw10_CSI2P, + .thumbScanline = thumbScanlineRaw_CSI2P, + } }, + { formats::SGRBG10_CSI2P, { + .bitsPerSample = 10, + .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen }, + .packScanline = packScanlineRaw10_CSI2P, + .thumbScanline = thumbScanlineRaw_CSI2P, + } }, + { formats::SRGGB10_CSI2P, { + .bitsPerSample = 10, + .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue }, + .packScanline = packScanlineRaw10_CSI2P, + .thumbScanline = thumbScanlineRaw_CSI2P, + } }, + { formats::SBGGR12_CSI2P, { + .bitsPerSample = 12, + .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed }, + .packScanline = packScanlineRaw12_CSI2P, + .thumbScanline = thumbScanlineRaw_CSI2P, + } }, + { formats::SGBRG12_CSI2P, { + .bitsPerSample = 12, + .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen }, + .packScanline = packScanlineRaw12_CSI2P, + .thumbScanline = thumbScanlineRaw_CSI2P, + } }, + { formats::SGRBG12_CSI2P, { + .bitsPerSample = 12, + .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen }, + .packScanline = packScanlineRaw12_CSI2P, + .thumbScanline = thumbScanlineRaw_CSI2P, + } }, + { formats::SRGGB12_CSI2P, { + .bitsPerSample = 12, + .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue }, + .packScanline = packScanlineRaw12_CSI2P, + .thumbScanline = thumbScanlineRaw_CSI2P, + } }, + { formats::SBGGR10_IPU3, { + .bitsPerSample = 16, + .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed }, + .packScanline = packScanlineIPU3, + .thumbScanline = thumbScanlineIPU3, + } }, + { formats::SGBRG10_IPU3, { + .bitsPerSample = 16, + .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen }, + .packScanline = packScanlineIPU3, + .thumbScanline = thumbScanlineIPU3, + } }, + { formats::SGRBG10_IPU3, { + .bitsPerSample = 16, + .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen }, + .packScanline = packScanlineIPU3, + .thumbScanline = thumbScanlineIPU3, + } }, + { formats::SRGGB10_IPU3, { + .bitsPerSample = 16, + .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue }, + .packScanline = packScanlineIPU3, + .thumbScanline = thumbScanlineIPU3, + } }, +}; + +} /* namespace */ + +int DNGWriter::write(const char *filename, const Camera *camera, + const StreamConfiguration &config, + const ControlList &metadata, + [[maybe_unused]] const FrameBuffer *buffer, + const void *data) +{ + const ControlList &cameraProperties = camera->properties(); + + const auto it = formatInfo.find(config.pixelFormat); + if (it == formatInfo.cend()) { + std::cerr << "Unsupported pixel format" << std::endl; + return -EINVAL; + } + const FormatInfo *info = &it->second; + + TIFF *tif = TIFFOpen(filename, "w"); + if (!tif) { + std::cerr << "Failed to open tiff file" << std::endl; + return -EINVAL; + } + + /* + * Scanline buffer, has to be large enough to store both a RAW scanline + * or a thumbnail scanline. The latter will always be much smaller than + * the former as we downscale by 16 in both directions. + */ + uint8_t scanline[(config.size.width * info->bitsPerSample + 7) / 8]; + + toff_t rawIFDOffset = 0; + toff_t exifIFDOffset = 0; + + /* + * Start with a thumbnail in IFD 0 for compatibility with TIFF baseline + * readers, as required by the TIFF/EP specification. Tags that apply to + * the whole file are stored here. + */ + const uint8_t version[] = { 1, 2, 0, 0 }; + + TIFFSetField(tif, TIFFTAG_DNGVERSION, version); + TIFFSetField(tif, TIFFTAG_DNGBACKWARDVERSION, version); + TIFFSetField(tif, TIFFTAG_FILLORDER, FILLORDER_MSB2LSB); + TIFFSetField(tif, TIFFTAG_MAKE, "libcamera"); + + const auto &model = cameraProperties.get(properties::Model); + if (model) { + TIFFSetField(tif, TIFFTAG_MODEL, model->c_str()); + /* \todo set TIFFTAG_UNIQUECAMERAMODEL. */ + } + + TIFFSetField(tif, TIFFTAG_SOFTWARE, "qcam"); + TIFFSetField(tif, TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT); + + /* + * Thumbnail-specific tags. The thumbnail is stored as an RGB image + * with 1/16 of the raw image resolution. Greyscale would save space, + * but doesn't seem well supported by RawTherapee. + */ + TIFFSetField(tif, TIFFTAG_SUBFILETYPE, FILETYPE_REDUCEDIMAGE); + TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, config.size.width / 16); + TIFFSetField(tif, TIFFTAG_IMAGELENGTH, config.size.height / 16); + TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 8); + TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_NONE); + TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB); + TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, 3); + TIFFSetField(tif, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG); + TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT); + + /* + * Fill in some reasonable colour information in the DNG. We supply + * the "neutral" colour values which determine the white balance, and the + * "ColorMatrix1" which converts XYZ to (un-white-balanced) camera RGB. + * Note that this is not a "proper" colour calibration for the DNG, + * nonetheless, many tools should be able to render the colours better. + */ + float neutral[3] = { 1, 1, 1 }; + Matrix3d wbGain = Matrix3d::identity(); + /* From http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html */ + const Matrix3d rgb2xyz(0.4124564, 0.3575761, 0.1804375, + 0.2126729, 0.7151522, 0.0721750, + 0.0193339, 0.1191920, 0.9503041); + Matrix3d ccm = Matrix3d::identity(); + /* + * Pick a reasonable number eps to protect against singularities. It + * should be comfortably larger than the point at which we run into + * numerical trouble, yet smaller than any plausible gain that we might + * apply to a colour, either explicitly or as part of the colour matrix. + */ + const double eps = 1e-2; + + const auto &colourGains = metadata.get(controls::ColourGains); + if (colourGains) { + if ((*colourGains)[0] > eps && (*colourGains)[1] > eps) { + wbGain = Matrix3d::diag((*colourGains)[0], 1, (*colourGains)[1]); + neutral[0] = 1.0 / (*colourGains)[0]; /* red */ + neutral[2] = 1.0 / (*colourGains)[1]; /* blue */ + } + } + + const auto &ccmControl = metadata.get(controls::ColourCorrectionMatrix); + if (ccmControl) { + Matrix3d ccmSupplied(*ccmControl); + if (ccmSupplied.determinant() > eps) + ccm = ccmSupplied; + } + + /* + * rgb2xyz is known to be invertible, and we've ensured above that both + * the ccm and wbGain matrices are non-singular, so the product of all + * three is guaranteed to be invertible too. + */ + Matrix3d colorMatrix1 = (rgb2xyz * ccm * wbGain).inverse(); + + TIFFSetField(tif, TIFFTAG_COLORMATRIX1, 9, colorMatrix1.m); + TIFFSetField(tif, TIFFTAG_ASSHOTNEUTRAL, 3, neutral); + + /* + * Reserve space for the SubIFD and ExifIFD tags, pointing to the IFD + * for the raw image and EXIF data respectively. The real offsets will + * be set later. + */ + TIFFSetField(tif, TIFFTAG_SUBIFD, 1, &rawIFDOffset); + TIFFSetField(tif, TIFFTAG_EXIFIFD, exifIFDOffset); + + /* Write the thumbnail. */ + const uint8_t *row = static_cast(data); + for (unsigned int y = 0; y < config.size.height / 16; y++) { + info->thumbScanline(*info, &scanline, row, + config.size.width / 16, config.stride); + + if (TIFFWriteScanline(tif, &scanline, y, 0) != 1) { + std::cerr << "Failed to write thumbnail scanline" + << std::endl; + TIFFClose(tif); + return -EINVAL; + } + + row += config.stride * 16; + } + + TIFFWriteDirectory(tif); + + /* + * Workaround for a bug introduced in libtiff version 4.5.1 and no fix + * released. In these versions the CFA* tags were missing in the field + * info. + * Introduced by: https://gitlab.com/libtiff/libtiff/-/commit/738e04099b13192bb1f654e74e9b5829313f3161 + * Fixed by: https://gitlab.com/libtiff/libtiff/-/commit/49856998c3d82e65444b47bb4fb11b7830a0c2be + */ + if (!TIFFFindField(tif, TIFFTAG_CFAREPEATPATTERNDIM, TIFF_ANY)) { + static const TIFFFieldInfo infos[] = { + { TIFFTAG_CFAREPEATPATTERNDIM, 2, 2, TIFF_SHORT, FIELD_CUSTOM, + 1, 0, const_cast("CFARepeatPatternDim") }, + { TIFFTAG_CFAPATTERN, -1, -1, TIFF_BYTE, FIELD_CUSTOM, + 1, 1, const_cast("CFAPattern") }, + }; + TIFFMergeFieldInfo(tif, infos, 2); + } + + /* Create a new IFD for the RAW image. */ + const uint16_t cfaRepeatPatternDim[] = { 2, 2 }; + const uint8_t cfaPlaneColor[] = { + CFAPatternRed, + CFAPatternGreen, + CFAPatternBlue + }; + + TIFFSetField(tif, TIFFTAG_SUBFILETYPE, 0); + TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, config.size.width); + TIFFSetField(tif, TIFFTAG_IMAGELENGTH, config.size.height); + TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, info->bitsPerSample); + TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_NONE); + TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_CFA); + TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, 1); + TIFFSetField(tif, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG); + TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT); + TIFFSetField(tif, TIFFTAG_CFAREPEATPATTERNDIM, cfaRepeatPatternDim); + if (TIFFLIB_VERSION < 20201219) + TIFFSetField(tif, TIFFTAG_CFAPATTERN, info->pattern); + else + TIFFSetField(tif, TIFFTAG_CFAPATTERN, 4, info->pattern); + TIFFSetField(tif, TIFFTAG_CFAPLANECOLOR, 3, cfaPlaneColor); + TIFFSetField(tif, TIFFTAG_CFALAYOUT, 1); + + const uint16_t blackLevelRepeatDim[] = { 2, 2 }; + float blackLevel[] = { 0.0f, 0.0f, 0.0f, 0.0f }; + uint32_t whiteLevel = (1 << info->bitsPerSample) - 1; + + const auto &blackLevels = metadata.get(controls::SensorBlackLevels); + if (blackLevels) { + Span levels = *blackLevels; + + /* + * The black levels control is specified in R, Gr, Gb, B order. + * Map it to the TIFF tag that is specified in CFA pattern + * order. + */ + unsigned int green = (info->pattern[0] == CFAPatternRed || + info->pattern[1] == CFAPatternRed) + ? 0 : 1; + + for (unsigned int i = 0; i < 4; ++i) { + unsigned int level; + + switch (info->pattern[i]) { + case CFAPatternRed: + level = levels[0]; + break; + case CFAPatternGreen: + level = levels[green + 1]; + green = (green + 1) % 2; + break; + case CFAPatternBlue: + default: + level = levels[3]; + break; + } + + /* Map the 16-bit value to the bits per sample range. */ + blackLevel[i] = level >> (16 - info->bitsPerSample); + } + } + + TIFFSetField(tif, TIFFTAG_BLACKLEVELREPEATDIM, &blackLevelRepeatDim); + TIFFSetField(tif, TIFFTAG_BLACKLEVEL, 4, &blackLevel); + TIFFSetField(tif, TIFFTAG_WHITELEVEL, 1, &whiteLevel); + + /* Write RAW content. */ + row = static_cast(data); + for (unsigned int y = 0; y < config.size.height; y++) { + info->packScanline(&scanline, row, config.size.width); + + if (TIFFWriteScanline(tif, &scanline, y, 0) != 1) { + std::cerr << "Failed to write RAW scanline" + << std::endl; + TIFFClose(tif); + return -EINVAL; + } + + row += config.stride; + } + + /* Checkpoint the IFD to retrieve its offset, and write it out. */ + TIFFCheckpointDirectory(tif); + rawIFDOffset = TIFFCurrentDirOffset(tif); + TIFFWriteDirectory(tif); + + /* Create a new IFD for the EXIF data and fill it. */ + TIFFCreateEXIFDirectory(tif); + + /* Store creation time. */ + time_t rawtime; + struct tm *timeinfo; + char strTime[20]; + + time(&rawtime); + timeinfo = localtime(&rawtime); + strftime(strTime, 20, "%Y:%m:%d %H:%M:%S", timeinfo); + + /* + * \todo Handle timezone information by setting OffsetTimeOriginal and + * OffsetTimeDigitized once libtiff catches up to the specification and + * has EXIFTAG_ defines to handle them. + */ + TIFFSetField(tif, EXIFTAG_DATETIMEORIGINAL, strTime); + TIFFSetField(tif, EXIFTAG_DATETIMEDIGITIZED, strTime); + + const auto &analogGain = metadata.get(controls::AnalogueGain); + if (analogGain) { + uint16_t iso = std::min(std::max(*analogGain * 100, 0.0f), 65535.0f); + TIFFSetField(tif, EXIFTAG_ISOSPEEDRATINGS, 1, &iso); + } + + const auto &exposureTime = metadata.get(controls::ExposureTime); + if (exposureTime) + TIFFSetField(tif, EXIFTAG_EXPOSURETIME, *exposureTime / 1e6); + + TIFFWriteCustomDirectory(tif, &exifIFDOffset); + + /* Update the IFD offsets and close the file. */ + TIFFSetDirectory(tif, 0); + TIFFSetField(tif, TIFFTAG_SUBIFD, 1, &rawIFDOffset); + TIFFSetField(tif, TIFFTAG_EXIFIFD, exifIFDOffset); + TIFFWriteDirectory(tif); + + TIFFClose(tif); + + return 0; +} diff --git a/spider-cam/libcamera/src/apps/common/dng_writer.h b/spider-cam/libcamera/src/apps/common/dng_writer.h new file mode 100644 index 0000000..917713e --- /dev/null +++ b/spider-cam/libcamera/src/apps/common/dng_writer.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Raspberry Pi Ltd + * + * DNG writer + */ + +#pragma once + +#ifdef HAVE_TIFF +#define HAVE_DNG + +#include +#include +#include +#include + +class DNGWriter +{ +public: + static int write(const char *filename, const libcamera::Camera *camera, + const libcamera::StreamConfiguration &config, + const libcamera::ControlList &metadata, + const libcamera::FrameBuffer *buffer, const void *data); +}; + +#endif /* HAVE_TIFF */ diff --git a/spider-cam/libcamera/src/apps/common/event_loop.cpp b/spider-cam/libcamera/src/apps/common/event_loop.cpp new file mode 100644 index 0000000..f7f9afa --- /dev/null +++ b/spider-cam/libcamera/src/apps/common/event_loop.cpp @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * cam - Event loop + */ + +#include "event_loop.h" + +#include +#include +#include +#include + +EventLoop *EventLoop::instance_ = nullptr; + +EventLoop::EventLoop() +{ + assert(!instance_); + + evthread_use_pthreads(); + base_ = event_base_new(); + instance_ = this; +} + +EventLoop::~EventLoop() +{ + instance_ = nullptr; + + events_.clear(); + event_base_free(base_); + libevent_global_shutdown(); +} + +EventLoop *EventLoop::instance() +{ + return instance_; +} + +int EventLoop::exec() +{ + exitCode_ = -1; + event_base_loop(base_, EVLOOP_NO_EXIT_ON_EMPTY); + return exitCode_; +} + +void EventLoop::exit(int code) +{ + exitCode_ = code; + event_base_loopbreak(base_); +} + +void EventLoop::callLater(const std::function &func) +{ + { + std::unique_lock locker(lock_); + calls_.push_back(func); + } + + event_base_once(base_, -1, EV_TIMEOUT, dispatchCallback, this, nullptr); +} + +void EventLoop::addFdEvent(int fd, EventType type, + const std::function &callback) +{ + std::unique_ptr event = std::make_unique(callback); + short events = (type & Read ? EV_READ : 0) + | (type & Write ? EV_WRITE : 0) + | EV_PERSIST; + + event->event_ = event_new(base_, fd, events, &EventLoop::Event::dispatch, + event.get()); + if (!event->event_) { + std::cerr << "Failed to create event for fd " << fd << std::endl; + return; + } + + int ret = event_add(event->event_, nullptr); + if (ret < 0) { + std::cerr << "Failed to add event for fd " << fd << std::endl; + return; + } + + events_.push_back(std::move(event)); +} + +void EventLoop::addTimerEvent(const std::chrono::microseconds period, + const std::function &callback) +{ + std::unique_ptr event = std::make_unique(callback); + event->event_ = event_new(base_, -1, EV_PERSIST, &EventLoop::Event::dispatch, + event.get()); + if (!event->event_) { + std::cerr << "Failed to create timer event" << std::endl; + return; + } + + struct timeval tv; + tv.tv_sec = period.count() / 1000000ULL; + tv.tv_usec = period.count() % 1000000ULL; + + int ret = event_add(event->event_, &tv); + if (ret < 0) { + std::cerr << "Failed to add timer event" << std::endl; + return; + } + + events_.push_back(std::move(event)); +} + +void EventLoop::dispatchCallback([[maybe_unused]] evutil_socket_t fd, + [[maybe_unused]] short flags, void *param) +{ + EventLoop *loop = static_cast(param); + loop->dispatchCall(); +} + +void EventLoop::dispatchCall() +{ + std::function call; + + { + std::unique_lock locker(lock_); + if (calls_.empty()) + return; + + call = calls_.front(); + calls_.pop_front(); + } + + call(); +} + +EventLoop::Event::Event(const std::function &callback) + : callback_(callback), event_(nullptr) +{ +} + +EventLoop::Event::~Event() +{ + event_del(event_); + event_free(event_); +} + +void EventLoop::Event::dispatch([[maybe_unused]] int fd, + [[maybe_unused]] short events, void *arg) +{ + Event *event = static_cast(arg); + event->callback_(); +} diff --git a/spider-cam/libcamera/src/apps/common/event_loop.h b/spider-cam/libcamera/src/apps/common/event_loop.h new file mode 100644 index 0000000..ef129b9 --- /dev/null +++ b/spider-cam/libcamera/src/apps/common/event_loop.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * cam - Event loop + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include + +struct event_base; + +class EventLoop +{ +public: + enum EventType { + Read = 1, + Write = 2, + }; + + EventLoop(); + ~EventLoop(); + + static EventLoop *instance(); + + int exec(); + void exit(int code = 0); + + void callLater(const std::function &func); + + void addFdEvent(int fd, EventType type, + const std::function &handler); + + using duration = std::chrono::steady_clock::duration; + void addTimerEvent(const std::chrono::microseconds period, + const std::function &handler); + +private: + struct Event { + Event(const std::function &callback); + ~Event(); + + static void dispatch(int fd, short events, void *arg); + + std::function callback_; + struct event *event_; + }; + + static EventLoop *instance_; + + struct event_base *base_; + int exitCode_; + + std::list> calls_; + std::list> events_; + std::mutex lock_; + + static void dispatchCallback(evutil_socket_t fd, short flags, + void *param); + void dispatchCall(); +}; diff --git a/spider-cam/libcamera/src/apps/common/image.cpp b/spider-cam/libcamera/src/apps/common/image.cpp new file mode 100644 index 0000000..a2a0f58 --- /dev/null +++ b/spider-cam/libcamera/src/apps/common/image.cpp @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Ideas on Board Oy + * + * Multi-planar image with access to pixel data + */ + +#include "image.h" + +#include +#include +#include +#include +#include +#include +#include + +using namespace libcamera; + +std::unique_ptr Image::fromFrameBuffer(const FrameBuffer *buffer, MapMode mode) +{ + std::unique_ptr image{ new Image() }; + + assert(!buffer->planes().empty()); + + int mmapFlags = 0; + + if (mode & MapMode::ReadOnly) + mmapFlags |= PROT_READ; + + if (mode & MapMode::WriteOnly) + mmapFlags |= PROT_WRITE; + + struct MappedBufferInfo { + uint8_t *address = nullptr; + size_t mapLength = 0; + size_t dmabufLength = 0; + }; + std::map mappedBuffers; + + for (const FrameBuffer::Plane &plane : buffer->planes()) { + const int fd = plane.fd.get(); + if (mappedBuffers.find(fd) == mappedBuffers.end()) { + const size_t length = lseek(fd, 0, SEEK_END); + mappedBuffers[fd] = MappedBufferInfo{ nullptr, 0, length }; + } + + const size_t length = mappedBuffers[fd].dmabufLength; + + if (plane.offset > length || + plane.offset + plane.length > length) { + std::cerr << "plane is out of buffer: buffer length=" + << length << ", plane offset=" << plane.offset + << ", plane length=" << plane.length + << std::endl; + return nullptr; + } + size_t &mapLength = mappedBuffers[fd].mapLength; + mapLength = std::max(mapLength, + static_cast(plane.offset + plane.length)); + } + + for (const FrameBuffer::Plane &plane : buffer->planes()) { + const int fd = plane.fd.get(); + auto &info = mappedBuffers[fd]; + if (!info.address) { + void *address = mmap(nullptr, info.mapLength, mmapFlags, + MAP_SHARED, fd, 0); + if (address == MAP_FAILED) { + int error = -errno; + std::cerr << "Failed to mmap plane: " + << strerror(-error) << std::endl; + return nullptr; + } + + info.address = static_cast(address); + image->maps_.emplace_back(info.address, info.mapLength); + } + + image->planes_.emplace_back(info.address + plane.offset, plane.length); + } + + return image; +} + +Image::Image() = default; + +Image::~Image() +{ + for (Span &map : maps_) + munmap(map.data(), map.size()); +} + +unsigned int Image::numPlanes() const +{ + return planes_.size(); +} + +Span Image::data(unsigned int plane) +{ + assert(plane <= planes_.size()); + return planes_[plane]; +} + +Span Image::data(unsigned int plane) const +{ + assert(plane <= planes_.size()); + return planes_[plane]; +} diff --git a/spider-cam/libcamera/src/apps/common/image.h b/spider-cam/libcamera/src/apps/common/image.h new file mode 100644 index 0000000..e47e446 --- /dev/null +++ b/spider-cam/libcamera/src/apps/common/image.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Ideas on Board Oy + * + * Multi-planar image with access to pixel data + */ + +#pragma once + +#include +#include +#include + +#include +#include +#include + +#include + +class Image +{ +public: + enum class MapMode { + ReadOnly = 1 << 0, + WriteOnly = 1 << 1, + ReadWrite = ReadOnly | WriteOnly, + }; + + static std::unique_ptr fromFrameBuffer(const libcamera::FrameBuffer *buffer, + MapMode mode); + + ~Image(); + + unsigned int numPlanes() const; + + libcamera::Span data(unsigned int plane); + libcamera::Span data(unsigned int plane) const; + +private: + LIBCAMERA_DISABLE_COPY(Image) + + Image(); + + std::vector> maps_; + std::vector> planes_; +}; + +namespace libcamera { +LIBCAMERA_FLAGS_ENABLE_OPERATORS(Image::MapMode) +} diff --git a/spider-cam/libcamera/src/apps/common/meson.build b/spider-cam/libcamera/src/apps/common/meson.build new file mode 100644 index 0000000..5b68339 --- /dev/null +++ b/spider-cam/libcamera/src/apps/common/meson.build @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: CC0-1.0 + +apps_sources = files([ + 'image.cpp', + 'options.cpp', + 'ppm_writer.cpp', + 'stream_options.cpp', +]) + +apps_cpp_args = [] + +if libevent.found() + apps_sources += files([ + 'event_loop.cpp', + ]) +endif + +if libtiff.found() + apps_cpp_args += ['-DHAVE_TIFF'] + apps_sources += files([ + 'dng_writer.cpp', + ]) +endif + +apps_lib = static_library('apps', apps_sources, + cpp_args : apps_cpp_args, + dependencies : [libcamera_public]) diff --git a/spider-cam/libcamera/src/apps/common/options.cpp b/spider-cam/libcamera/src/apps/common/options.cpp new file mode 100644 index 0000000..ab19aa3 --- /dev/null +++ b/spider-cam/libcamera/src/apps/common/options.cpp @@ -0,0 +1,1141 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * cam - Options parsing + */ + +#include +#include +#include +#include +#include + +#include "options.h" + +/** + * \enum OptionArgument + * \brief Indicate if an option takes an argument + * + * \var OptionArgument::ArgumentNone + * \brief The option doesn't accept any argument + * + * \var OptionArgument::ArgumentRequired + * \brief The option requires an argument + * + * \var OptionArgument::ArgumentOptional + * \brief The option accepts an optional argument + */ + +/** + * \enum OptionType + * \brief The type of argument for an option + * + * \var OptionType::OptionNone + * \brief No argument type, used for options that take no argument + * + * \var OptionType::OptionInteger + * \brief Integer argument type, with an optional base prefix (`0` for base 8, + * `0x` for base 16, none for base 10) + * + * \var OptionType::OptionString + * \brief String argument + * + * \var OptionType::OptionKeyValue + * \brief key=value list argument + */ + +/* ----------------------------------------------------------------------------- + * Option + */ + +/** + * \struct Option + * \brief Store metadata about an option + * + * \var Option::opt + * \brief The option identifier + * + * \var Option::type + * \brief The type of the option argument + * + * \var Option::name + * \brief The option name + * + * \var Option::argument + * \brief Whether the option accepts an optional argument, a mandatory + * argument, or no argument at all + * + * \var Option::argumentName + * \brief The argument name used in the help text + * + * \var Option::help + * \brief The help text (may be a multi-line string) + * + * \var Option::keyValueParser + * \brief For options of type OptionType::OptionKeyValue, the key-value parser + * to parse the argument + * + * \var Option::isArray + * \brief Whether the option can appear once or multiple times + * + * \var Option::parent + * \brief The parent option + * + * \var Option::children + * \brief List of child options, storing all options whose parent is this option + * + * \fn Option::hasShortOption() + * \brief Tell if the option has a short option specifier (e.g. `-f`) + * \return True if the option has a short option specifier, false otherwise + * + * \fn Option::hasLongOption() + * \brief Tell if the option has a long option specifier (e.g. `--foo`) + * \return True if the option has a long option specifier, false otherwise + */ +struct Option { + int opt; + OptionType type; + const char *name; + OptionArgument argument; + const char *argumentName; + const char *help; + KeyValueParser *keyValueParser; + bool isArray; + Option *parent; + std::list