An attempt at getting image data back

This commit is contained in:
2024-07-14 00:27:33 +02:00
parent e026bc93f7
commit 6452d2e774
1314 changed files with 218350 additions and 38 deletions

View File

@@ -0,0 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
from libtuning.utils import *
from libtuning.libtuning import *
from libtuning.image import *
from libtuning.macbeth import *
from libtuning.average import *
from libtuning.gradient import *
from libtuning.smoothing import *

View File

@@ -0,0 +1,21 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
# Wrapper for numpy averaging functions to enable duck-typing
import numpy as np
# @brief Wrapper for np averaging functions so that they can be duck-typed
class Average(object):
def __init__(self):
pass
def average(self, np_array):
raise NotImplementedError
class Mean(Average):
def average(self, np_array):
return np.mean(np_array)

View File

@@ -0,0 +1,378 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool for AWB
import matplotlib.pyplot as plt
from bisect import bisect_left
from scipy.optimize import fmin
import numpy as np
from .image import Image
"""
obtain piecewise linear approximation for colour curve
"""
def awb(Cam, cal_cr_list, cal_cb_list, plot):
imgs = Cam.imgs
"""
condense alsc calibration tables into one dictionary
"""
if cal_cr_list is None:
colour_cals = None
else:
colour_cals = {}
for cr, cb in zip(cal_cr_list, cal_cb_list):
cr_tab = cr['table']
cb_tab = cb['table']
"""
normalise tables so min value is 1
"""
cr_tab = cr_tab/np.min(cr_tab)
cb_tab = cb_tab/np.min(cb_tab)
colour_cals[cr['ct']] = [cr_tab, cb_tab]
"""
obtain data from greyscale macbeth patches
"""
rb_raw = []
rbs_hat = []
for Img in imgs:
Cam.log += '\nProcessing '+Img.name
"""
get greyscale patches with alsc applied if alsc enabled.
Note: if alsc is disabled then colour_cals will be set to None and the
function will just return the greyscale patches
"""
r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals)
"""
calculate ratio of r, b to g
"""
r_g = np.mean(r_patchs/g_patchs)
b_g = np.mean(b_patchs/g_patchs)
Cam.log += '\n r : {:.4f} b : {:.4f}'.format(r_g, b_g)
"""
The curve tends to be better behaved in so-called hatspace.
R, B, G represent the individual channels. The colour curve is plotted in
r, b space, where:
r = R/G
b = B/G
This will be referred to as dehatspace... (sorry)
Hatspace is defined as:
r_hat = R/(R+B+G)
b_hat = B/(R+B+G)
To convert from dehatspace to hastpace (hat operation):
r_hat = r/(1+r+b)
b_hat = b/(1+r+b)
To convert from hatspace to dehatspace (dehat operation):
r = r_hat/(1-r_hat-b_hat)
b = b_hat/(1-r_hat-b_hat)
Proof is left as an excercise to the reader...
Throughout the code, r and b are sometimes referred to as r_g and b_g
as a reminder that they are ratios
"""
r_g_hat = r_g/(1+r_g+b_g)
b_g_hat = b_g/(1+r_g+b_g)
Cam.log += '\n r_hat : {:.4f} b_hat : {:.4f}'.format(r_g_hat, b_g_hat)
rbs_hat.append((r_g_hat, b_g_hat, Img.col))
rb_raw.append((r_g, b_g))
Cam.log += '\n'
Cam.log += '\nFinished processing images'
"""
sort all lits simultaneously by r_hat
"""
rbs_zip = list(zip(rbs_hat, rb_raw))
rbs_zip.sort(key=lambda x: x[0][0])
rbs_hat, rb_raw = list(zip(*rbs_zip))
"""
unzip tuples ready for processing
"""
rbs_hat = list(zip(*rbs_hat))
rb_raw = list(zip(*rb_raw))
"""
fit quadratic fit to r_g hat and b_g_hat
"""
a, b, c = np.polyfit(rbs_hat[0], rbs_hat[1], 2)
Cam.log += '\nFit quadratic curve in hatspace'
"""
the algorithm now approximates the shortest distance from each point to the
curve in dehatspace. Since the fit is done in hatspace, it is easier to
find the actual shortest distance in hatspace and use the projection back
into dehatspace as an overestimate.
The distance will be used for two things:
1) In the case that colour temperature does not strictly decrease with
increasing r/g, the closest point to the line will be chosen out of an
increasing pair of colours.
2) To calculate transverse negative an dpositive, the maximum positive
and negative distance from the line are chosen. This benefits from the
overestimate as the transverse pos/neg are upper bound values.
"""
"""
define fit function
"""
def f(x):
return a*x**2 + b*x + c
"""
iterate over points (R, B are x and y coordinates of points) and calculate
distance to line in dehatspace
"""
dists = []
for i, (R, B) in enumerate(zip(rbs_hat[0], rbs_hat[1])):
"""
define function to minimise as square distance between datapoint and
point on curve. Squaring is monotonic so minimising radius squared is
equivalent to minimising radius
"""
def f_min(x):
y = f(x)
return((x-R)**2+(y-B)**2)
"""
perform optimisation with scipy.optmisie.fmin
"""
x_hat = fmin(f_min, R, disp=0)[0]
y_hat = f(x_hat)
"""
dehat
"""
x = x_hat/(1-x_hat-y_hat)
y = y_hat/(1-x_hat-y_hat)
rr = R/(1-R-B)
bb = B/(1-R-B)
"""
calculate euclidean distance in dehatspace
"""
dist = ((x-rr)**2+(y-bb)**2)**0.5
"""
return negative if point is below the fit curve
"""
if (x+y) > (rr+bb):
dist *= -1
dists.append(dist)
Cam.log += '\nFound closest point on fit line to each point in dehatspace'
"""
calculate wiggle factors in awb. 10% added since this is an upper bound
"""
transverse_neg = - np.min(dists) * 1.1
transverse_pos = np.max(dists) * 1.1
Cam.log += '\nTransverse pos : {:.5f}'.format(transverse_pos)
Cam.log += '\nTransverse neg : {:.5f}'.format(transverse_neg)
"""
set minimum transverse wiggles to 0.1 .
Wiggle factors dictate how far off of the curve the algorithm searches. 0.1
is a suitable minimum that gives better results for lighting conditions not
within calibration dataset. Anything less will generalise poorly.
"""
if transverse_pos < 0.01:
transverse_pos = 0.01
Cam.log += '\nForced transverse pos to 0.01'
if transverse_neg < 0.01:
transverse_neg = 0.01
Cam.log += '\nForced transverse neg to 0.01'
"""
generate new b_hat values at each r_hat according to fit
"""
r_hat_fit = np.array(rbs_hat[0])
b_hat_fit = a*r_hat_fit**2 + b*r_hat_fit + c
"""
transform from hatspace to dehatspace
"""
r_fit = r_hat_fit/(1-r_hat_fit-b_hat_fit)
b_fit = b_hat_fit/(1-r_hat_fit-b_hat_fit)
c_fit = np.round(rbs_hat[2], 0)
"""
round to 4dp
"""
r_fit = np.where((1000*r_fit) % 1 <= 0.05, r_fit+0.0001, r_fit)
r_fit = np.where((1000*r_fit) % 1 >= 0.95, r_fit-0.0001, r_fit)
b_fit = np.where((1000*b_fit) % 1 <= 0.05, b_fit+0.0001, b_fit)
b_fit = np.where((1000*b_fit) % 1 >= 0.95, b_fit-0.0001, b_fit)
r_fit = np.round(r_fit, 4)
b_fit = np.round(b_fit, 4)
"""
The following code ensures that colour temperature decreases with
increasing r/g
"""
"""
iterate backwards over list for easier indexing
"""
i = len(c_fit) - 1
while i > 0:
if c_fit[i] > c_fit[i-1]:
Cam.log += '\nColour temperature increase found\n'
Cam.log += '{} K at r = {} to '.format(c_fit[i-1], r_fit[i-1])
Cam.log += '{} K at r = {}'.format(c_fit[i], r_fit[i])
"""
if colour temperature increases then discard point furthest from
the transformed fit (dehatspace)
"""
error_1 = abs(dists[i-1])
error_2 = abs(dists[i])
Cam.log += '\nDistances from fit:\n'
Cam.log += '{} K : {:.5f} , '.format(c_fit[i], error_1)
Cam.log += '{} K : {:.5f}'.format(c_fit[i-1], error_2)
"""
find bad index
note that in python false = 0 and true = 1
"""
bad = i - (error_1 < error_2)
Cam.log += '\nPoint at {} K deleted as '.format(c_fit[bad])
Cam.log += 'it is furthest from fit'
"""
delete bad point
"""
r_fit = np.delete(r_fit, bad)
b_fit = np.delete(b_fit, bad)
c_fit = np.delete(c_fit, bad).astype(np.uint16)
"""
note that if a point has been discarded then the length has decreased
by one, meaning that decreasing the index by one will reassess the kept
point against the next point. It is therefore possible, in theory, for
two adjacent points to be discarded, although probably rare
"""
i -= 1
"""
return formatted ct curve, ordered by increasing colour temperature
"""
ct_curve = list(np.array(list(zip(b_fit, r_fit, c_fit))).flatten())[::-1]
Cam.log += '\nFinal CT curve:'
for i in range(len(ct_curve)//3):
j = 3*i
Cam.log += '\n ct: {} '.format(ct_curve[j])
Cam.log += ' r: {} '.format(ct_curve[j+1])
Cam.log += ' b: {} '.format(ct_curve[j+2])
"""
plotting code for debug
"""
if plot:
x = np.linspace(np.min(rbs_hat[0]), np.max(rbs_hat[0]), 100)
y = a*x**2 + b*x + c
plt.subplot(2, 1, 1)
plt.title('hatspace')
plt.plot(rbs_hat[0], rbs_hat[1], ls='--', color='blue')
plt.plot(x, y, color='green', ls='-')
plt.scatter(rbs_hat[0], rbs_hat[1], color='red')
for i, ct in enumerate(rbs_hat[2]):
plt.annotate(str(ct), (rbs_hat[0][i], rbs_hat[1][i]))
plt.xlabel('$\\hat{r}$')
plt.ylabel('$\\hat{b}$')
"""
optional set axes equal to shortest distance so line really does
looks perpendicular and everybody is happy
"""
# ax = plt.gca()
# ax.set_aspect('equal')
plt.grid()
plt.subplot(2, 1, 2)
plt.title('dehatspace - indoors?')
plt.plot(r_fit, b_fit, color='blue')
plt.scatter(rb_raw[0], rb_raw[1], color='green')
plt.scatter(r_fit, b_fit, color='red')
for i, ct in enumerate(c_fit):
plt.annotate(str(ct), (r_fit[i], b_fit[i]))
plt.xlabel('$r$')
plt.ylabel('$b$')
"""
optional set axes equal to shortest distance so line really does
looks perpendicular and everybody is happy
"""
# ax = plt.gca()
# ax.set_aspect('equal')
plt.subplots_adjust(hspace=0.5)
plt.grid()
plt.show()
"""
end of plotting code
"""
return(ct_curve, np.round(transverse_pos, 5), np.round(transverse_neg, 5))
"""
obtain greyscale patches and perform alsc colour correction
"""
def get_alsc_patches(Img, colour_cals, grey=True):
"""
get patch centre coordinates, image colour and the actual
patches for each channel, remembering to subtract blacklevel
If grey then only greyscale patches considered
"""
if grey:
cen_coords = Img.cen_coords[3::4]
col = Img.col
patches = [np.array(Img.patches[i]) for i in Img.order]
r_patchs = patches[0][3::4] - Img.blacklevel_16
b_patchs = patches[3][3::4] - Img.blacklevel_16
"""
note two green channels are averages
"""
g_patchs = (patches[1][3::4]+patches[2][3::4])/2 - Img.blacklevel_16
else:
cen_coords = Img.cen_coords
col = Img.color
patches = [np.array(Img.patches[i]) for i in Img.order]
r_patchs = patches[0] - Img.blacklevel_16
b_patchs = patches[3] - Img.blacklevel_16
g_patchs = (patches[1]+patches[2])/2 - Img.blacklevel_16
if colour_cals is None:
return r_patchs, b_patchs, g_patchs
"""
find where image colour fits in alsc colour calibration tables
"""
cts = list(colour_cals.keys())
pos = bisect_left(cts, col)
"""
if img colour is below minimum or above maximum alsc calibration colour, simply
pick extreme closest to img colour
"""
if pos % len(cts) == 0:
"""
this works because -0 = 0 = first and -1 = last index
"""
col_tabs = np.array(colour_cals[cts[-pos//len(cts)]])
"""
else, perform linear interpolation between existing alsc colour
calibration tables
"""
else:
bef = cts[pos-1]
aft = cts[pos]
da = col-bef
db = aft-col
bef_tabs = np.array(colour_cals[bef])
aft_tabs = np.array(colour_cals[aft])
col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db)
col_tabs = np.reshape(col_tabs, (2, 12, 16))
"""
calculate dx, dy used to calculate alsc table
"""
w, h = Img.w/2, Img.h/2
dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))
"""
make list of pairs of gains for each patch by selecting the correct value
in alsc colour calibration table
"""
patch_gains = []
for cen in cen_coords:
x, y = cen[0]//dx, cen[1]//dy
# We could probably do with some better spatial interpolation here?
col_gains = (col_tabs[0][y][x], col_tabs[1][y][x])
patch_gains.append(col_gains)
"""
multiply the r and b channels in each patch by the respective gain, finally
performing the alsc colour correction
"""
for i, gains in enumerate(patch_gains):
r_patchs[i] = r_patchs[i] * gains[0]
b_patchs[i] = b_patchs[i] * gains[1]
"""
return greyscale patches, g channel and correct r, b channels
"""
return r_patchs, b_patchs, g_patchs

View File

@@ -0,0 +1,408 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool for CCM (colour correction matrix)
import logging
import numpy as np
from scipy.optimize import minimize
from . import ctt_colors as colors
from .image import Image
from .ctt_awb import get_alsc_patches
from .utils import visualise_macbeth_chart
logger = logging.getLogger(__name__)
"""
takes 8-bit macbeth chart values, degammas and returns 16 bit
"""
'''
This program has many options from which to derive the color matrix from.
The first is average. This minimises the average delta E across all patches of
the macbeth chart. Testing across all cameras yeilded this as the most color
accurate and vivid. Other options are avalible however.
Maximum minimises the maximum Delta E of the patches. It iterates through till
a minimum maximum is found (so that there is
not one patch that deviates wildly.)
This yields generally good results but overall the colors are less accurate
Have a fiddle with maximum and see what you think.
The final option allows you to select the patches for which to average across.
This means that you can bias certain patches, for instance if you want the
reds to be more accurate.
'''
matrix_selection_types = ["average", "maximum", "patches"]
typenum = 0 # select from array above, 0 = average, 1 = maximum, 2 = patches
test_patches = [1, 2, 5, 8, 9, 12, 14]
'''
Enter patches to test for. Can also be entered twice if you
would like twice as much bias on one patch.
'''
def degamma(x):
x = x / ((2 ** 8) - 1) # takes 255 and scales it down to one
x = np.where(x < 0.04045, x / 12.92, ((x + 0.055) / 1.055) ** 2.4)
x = x * ((2 ** 16) - 1) # takes one and scales up to 65535, 16 bit color
return x
def gamma(x):
# Take 3 long array of color values and gamma them
return [((colour / 255) ** (1 / 2.4) * 1.055 - 0.055) * 255 for colour in x]
"""
FInds colour correction matrices for list of images
"""
def ccm(imgs, cal_cr_list, cal_cb_list):
global matrix_selection_types, typenum
"""
standard macbeth chart colour values
"""
m_rgb = np.array([ # these are in RGB
[116, 81, 67], # dark skin
[199, 147, 129], # light skin
[91, 122, 156], # blue sky
[90, 108, 64], # foliage
[130, 128, 176], # blue flower
[92, 190, 172], # bluish green
[224, 124, 47], # orange
[68, 91, 170], # purplish blue
[198, 82, 97], # moderate red
[94, 58, 106], # purple
[159, 189, 63], # yellow green
[230, 162, 39], # orange yellow
[35, 63, 147], # blue
[67, 149, 74], # green
[180, 49, 57], # red
[238, 198, 20], # yellow
[193, 84, 151], # magenta
[0, 136, 170], # cyan (goes out of gamut)
[245, 245, 243], # white 9.5
[200, 202, 202], # neutral 8
[161, 163, 163], # neutral 6.5
[121, 121, 122], # neutral 5
[82, 84, 86], # neutral 3.5
[49, 49, 51] # black 2
])
"""
convert reference colours from srgb to rgb
"""
m_srgb = degamma(m_rgb) # now in 16 bit color.
# Produce array of LAB values for ideal color chart
m_lab = [colors.RGB_to_LAB(color / 256) for color in m_srgb]
"""
reorder reference values to match how patches are ordered
"""
m_srgb = np.array([m_srgb[i::6] for i in range(6)]).reshape((24, 3))
m_lab = np.array([m_lab[i::6] for i in range(6)]).reshape((24, 3))
m_rgb = np.array([m_rgb[i::6] for i in range(6)]).reshape((24, 3))
"""
reformat alsc correction tables or set colour_cals to None if alsc is
deactivated
"""
if cal_cr_list is None:
colour_cals = None
else:
colour_cals = {}
for cr, cb in zip(cal_cr_list, cal_cb_list):
cr_tab = cr['table']
cb_tab = cb['table']
"""
normalise tables so min value is 1
"""
cr_tab = cr_tab / np.min(cr_tab)
cb_tab = cb_tab / np.min(cb_tab)
colour_cals[cr['ct']] = [cr_tab, cb_tab]
"""
for each image, perform awb and alsc corrections.
Then calculate the colour correction matrix for that image, recording the
ccm and the colour tempertaure.
"""
ccm_tab = {}
for Img in imgs:
logger.info('Processing image: ' + Img.name)
"""
get macbeth patches with alsc applied if alsc enabled.
Note: if alsc is disabled then colour_cals will be set to None and no
the function will simply return the macbeth patches
"""
r, b, g = get_alsc_patches(Img, colour_cals, grey=False)
# 256 values for each patch of sRGB values
"""
do awb
Note: awb is done by measuring the macbeth chart in the image, rather
than from the awb calibration. This is done so the awb will be perfect
and the ccm matrices will be more accurate.
"""
r_greys, b_greys, g_greys = r[3::4], b[3::4], g[3::4]
r_g = np.mean(r_greys / g_greys)
b_g = np.mean(b_greys / g_greys)
r = r / r_g
b = b / b_g
"""
normalise brightness wrt reference macbeth colours and then average
each channel for each patch
"""
gain = np.mean(m_srgb) / np.mean((r, g, b))
logger.info(f'Gain with respect to standard colours: {gain:.3f}')
r = np.mean(gain * r, axis=1)
b = np.mean(gain * b, axis=1)
g = np.mean(gain * g, axis=1)
"""
calculate ccm matrix
"""
# ==== All of below should in sRGB ===##
sumde = 0
ccm = do_ccm(r, g, b, m_srgb)
# This is the initial guess that our optimisation code works with.
original_ccm = ccm
r1 = ccm[0]
r2 = ccm[1]
g1 = ccm[3]
g2 = ccm[4]
b1 = ccm[6]
b2 = ccm[7]
'''
COLOR MATRIX LOOKS AS BELOW
R1 R2 R3 Rval Outr
G1 G2 G3 * Gval = G
B1 B2 B3 Bval B
Will be optimising 6 elements and working out the third element using 1-r1-r2 = r3
'''
x0 = [r1, r2, g1, g2, b1, b2]
'''
We use our old CCM as the initial guess for the program to find the
optimised matrix
'''
result = minimize(guess, x0, args=(r, g, b, m_lab), tol=0.01)
'''
This produces a color matrix which has the lowest delta E possible,
based off the input data. Note it is impossible for this to reach
zero since the input data is imperfect
'''
[r1, r2, g1, g2, b1, b2] = result.x
# The new, optimised color correction matrix values
# This is the optimised Color Matrix (preserving greys by summing rows up to 1)
optimised_ccm = [r1, r2, (1 - r1 - r2), g1, g2, (1 - g1 - g2), b1, b2, (1 - b1 - b2)]
logger.info(f'Optimized Matrix: {np.round(optimised_ccm, 4)}')
logger.info(f'Old Matrix: {np.round(ccm, 4)}')
formatted_ccm = np.array(original_ccm).reshape((3, 3))
'''
below is a whole load of code that then applies the latest color
matrix, and returns LAB values for color. This can then be used
to calculate the final delta E
'''
optimised_ccm_rgb = [] # Original Color Corrected Matrix RGB / LAB
optimised_ccm_lab = []
formatted_optimised_ccm = np.array(optimised_ccm).reshape((3, 3))
after_gamma_rgb = []
after_gamma_lab = []
for RGB in zip(r, g, b):
ccm_applied_rgb = np.dot(formatted_ccm, (np.array(RGB) / 256))
optimised_ccm_rgb.append(gamma(ccm_applied_rgb))
optimised_ccm_lab.append(colors.RGB_to_LAB(ccm_applied_rgb))
optimised_ccm_applied_rgb = np.dot(formatted_optimised_ccm, np.array(RGB) / 256)
after_gamma_rgb.append(gamma(optimised_ccm_applied_rgb))
after_gamma_lab.append(colors.RGB_to_LAB(optimised_ccm_applied_rgb))
'''
Gamma After RGB / LAB - not used in calculations, only used for visualisation
We now want to spit out some data that shows
how the optimisation has improved the color matrices
'''
logger.info("Here are the Improvements")
# CALCULATE WORST CASE delta e
old_worst_delta_e = 0
before_average = transform_and_evaluate(formatted_ccm, r, g, b, m_lab)
new_worst_delta_e = 0
after_average = transform_and_evaluate(formatted_optimised_ccm, r, g, b, m_lab)
for i in range(24):
old_delta_e = deltae(optimised_ccm_lab[i], m_lab[i]) # Current Old Delta E
new_delta_e = deltae(after_gamma_lab[i], m_lab[i]) # Current New Delta E
if old_delta_e > old_worst_delta_e:
old_worst_delta_e = old_delta_e
if new_delta_e > new_worst_delta_e:
new_worst_delta_e = new_delta_e
logger.info(f'delta E optimized: average: {after_average:.2f} max:{new_worst_delta_e:.2f}')
logger.info(f'delta E old: average: {before_average:.2f} max:{old_worst_delta_e:.2f}')
visualise_macbeth_chart(m_rgb, optimised_ccm_rgb, after_gamma_rgb, str(Img.color) + str(matrix_selection_types[typenum]))
'''
The program will also save some visualisations of improvements.
Very pretty to look at. Top rectangle is ideal, Left square is
before optimisation, right square is after.
'''
"""
if a ccm has already been calculated for that temperature then don't
overwrite but save both. They will then be averaged later on
""" # Now going to use optimised color matrix, optimised_ccm
if Img.color in ccm_tab.keys():
ccm_tab[Img.color].append(optimised_ccm)
else:
ccm_tab[Img.color] = [optimised_ccm]
logger.info('Finished processing images')
"""
average any ccms that share a colour temperature
"""
for k, v in ccm_tab.items():
tab = np.mean(v, axis=0)
tab = np.where((10000 * tab) % 1 <= 0.05, tab + 0.00001, tab)
tab = np.where((10000 * tab) % 1 >= 0.95, tab - 0.00001, tab)
ccm_tab[k] = list(np.round(tab, 5))
logger.info(f'Matrix calculated for colour temperature of {k} K')
"""
return all ccms with respective colour temperature in the correct format,
sorted by their colour temperature
"""
sorted_ccms = sorted(ccm_tab.items(), key=lambda kv: kv[0])
ccms = []
for i in sorted_ccms:
ccms.append({
'ct': i[0],
'ccm': i[1]
})
return ccms
def guess(x0, r, g, b, m_lab): # provides a method of numerical feedback for the optimisation code
[r1, r2, g1, g2, b1, b2] = x0
ccm = np.array([r1, r2, (1 - r1 - r2),
g1, g2, (1 - g1 - g2),
b1, b2, (1 - b1 - b2)]).reshape((3, 3)) # format the matrix correctly
return transform_and_evaluate(ccm, r, g, b, m_lab)
def transform_and_evaluate(ccm, r, g, b, m_lab): # Transforms colors to LAB and applies the correction matrix
# create list of matrix changed colors
realrgb = []
for RGB in zip(r, g, b):
rgb_post_ccm = np.dot(ccm, np.array(RGB) / 256) # This is RGB values after the color correction matrix has been applied
realrgb.append(colors.RGB_to_LAB(rgb_post_ccm))
# now compare that with m_lab and return numeric result, averaged for each patch
return (sumde(realrgb, m_lab) / 24) # returns an average result of delta E
def sumde(listA, listB):
global typenum, test_patches
sumde = 0
maxde = 0
patchde = [] # Create array of the delta E values for each patch. useful for optimisation of certain patches
for listA_item, listB_item in zip(listA, listB):
if maxde < (deltae(listA_item, listB_item)):
maxde = deltae(listA_item, listB_item)
patchde.append(deltae(listA_item, listB_item))
sumde += deltae(listA_item, listB_item)
'''
The different options specified at the start allow for
the maximum to be returned, average or specific patches
'''
if typenum == 0:
return sumde
if typenum == 1:
return maxde
if typenum == 2:
output = sum([patchde[test_patch] for test_patch in test_patches])
# Selects only certain patches and returns the output for them
return output
"""
calculates the ccm for an individual image.
ccms are calculated in rgb space, and are fit by hand. Although it is a 3x3
matrix, each row must add up to 1 in order to conserve greyness, simplifying
calculation.
The initial CCM is calculated in RGB, and then optimised in LAB color space
This simplifies the initial calculation but then gets us the accuracy of
using LAB color space.
"""
def do_ccm(r, g, b, m_srgb):
rb = r-b
gb = g-b
rb_2s = (rb * rb)
rb_gbs = (rb * gb)
gb_2s = (gb * gb)
r_rbs = rb * (m_srgb[..., 0] - b)
r_gbs = gb * (m_srgb[..., 0] - b)
g_rbs = rb * (m_srgb[..., 1] - b)
g_gbs = gb * (m_srgb[..., 1] - b)
b_rbs = rb * (m_srgb[..., 2] - b)
b_gbs = gb * (m_srgb[..., 2] - b)
"""
Obtain least squares fit
"""
rb_2 = np.sum(rb_2s)
gb_2 = np.sum(gb_2s)
rb_gb = np.sum(rb_gbs)
r_rb = np.sum(r_rbs)
r_gb = np.sum(r_gbs)
g_rb = np.sum(g_rbs)
g_gb = np.sum(g_gbs)
b_rb = np.sum(b_rbs)
b_gb = np.sum(b_gbs)
det = rb_2 * gb_2 - rb_gb * rb_gb
"""
Raise error if matrix is singular...
This shouldn't really happen with real data but if it does just take new
pictures and try again, not much else to be done unfortunately...
"""
if det < 0.001:
raise ArithmeticError
r_a = (gb_2 * r_rb - rb_gb * r_gb) / det
r_b = (rb_2 * r_gb - rb_gb * r_rb) / det
"""
Last row can be calculated by knowing the sum must be 1
"""
r_c = 1 - r_a - r_b
g_a = (gb_2 * g_rb - rb_gb * g_gb) / det
g_b = (rb_2 * g_gb - rb_gb * g_rb) / det
g_c = 1 - g_a - g_b
b_a = (gb_2 * b_rb - rb_gb * b_gb) / det
b_b = (rb_2 * b_gb - rb_gb * b_rb) / det
b_c = 1 - b_a - b_b
"""
format ccm
"""
ccm = [r_a, r_b, r_c, g_a, g_b, g_c, b_a, b_b, b_c]
return ccm
def deltae(colorA, colorB):
return ((colorA[0] - colorB[0]) ** 2 + (colorA[1] - colorB[1]) ** 2 + (colorA[2] - colorB[2]) ** 2) ** 0.5
# return ((colorA[1]-colorB[1]) * * 2 + (colorA[2]-colorB[2]) * * 2) * * 0.5
# UNCOMMENT IF YOU WANT TO NEGLECT LUMINANCE FROM CALCULATION OF DELTA E

View File

@@ -0,0 +1,30 @@
# Program to convert from RGB to LAB color space
def RGB_to_LAB(RGB): # where RGB is a 1x3 array. e.g RGB = [100, 255, 230]
num = 0
XYZ = [0, 0, 0]
# converted all the three R, G, B to X, Y, Z
X = RGB[0] * 0.4124 + RGB[1] * 0.3576 + RGB[2] * 0.1805
Y = RGB[0] * 0.2126 + RGB[1] * 0.7152 + RGB[2] * 0.0722
Z = RGB[0] * 0.0193 + RGB[1] * 0.1192 + RGB[2] * 0.9505
XYZ[0] = X / 255 * 100
XYZ[1] = Y / 255 * 100 # XYZ Must be in range 0 -> 100, so scale down from 255
XYZ[2] = Z / 255 * 100
XYZ[0] = XYZ[0] / 95.047 # ref_X = 95.047 Observer= 2°, Illuminant= D65
XYZ[1] = XYZ[1] / 100.0 # ref_Y = 100.000
XYZ[2] = XYZ[2] / 108.883 # ref_Z = 108.883
num = 0
for value in XYZ:
if value > 0.008856:
value = value ** (0.3333333333333333)
else:
value = (7.787 * value) + (16 / 116)
XYZ[num] = value
num = num + 1
# L, A, B, values calculated below
L = (116 * XYZ[1]) - 16
a = 500 * (XYZ[0] - XYZ[1])
b = 200 * (XYZ[1] - XYZ[2])
return [L, a, b]

View File

@@ -0,0 +1,71 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool RANSAC selector for Macbeth chart locator
import numpy as np
scale = 2
"""
constructs normalised macbeth chart corners for ransac algorithm
"""
def get_square_verts(c_err=0.05, scale=scale):
"""
define macbeth chart corners
"""
b_bord_x, b_bord_y = scale*8.5, scale*13
s_bord = 6*scale
side = 41*scale
x_max = side*6 + 5*s_bord + 2*b_bord_x
y_max = side*4 + 3*s_bord + 2*b_bord_y
c1 = (0, 0)
c2 = (0, y_max)
c3 = (x_max, y_max)
c4 = (x_max, 0)
mac_norm = np.array((c1, c2, c3, c4), np.float32)
mac_norm = np.array([mac_norm])
square_verts = []
square_0 = np.array(((0, 0), (0, side),
(side, side), (side, 0)), np.float32)
offset_0 = np.array((b_bord_x, b_bord_y), np.float32)
c_off = side * c_err
offset_cont = np.array(((c_off, c_off), (c_off, -c_off),
(-c_off, -c_off), (-c_off, c_off)), np.float32)
square_0 += offset_0
square_0 += offset_cont
"""
define macbeth square corners
"""
for i in range(6):
shift_i = np.array(((i*side, 0), (i*side, 0),
(i*side, 0), (i*side, 0)), np.float32)
shift_bord = np.array(((i*s_bord, 0), (i*s_bord, 0),
(i*s_bord, 0), (i*s_bord, 0)), np.float32)
square_i = square_0 + shift_i + shift_bord
for j in range(4):
shift_j = np.array(((0, j*side), (0, j*side),
(0, j*side), (0, j*side)), np.float32)
shift_bord = np.array(((0, j*s_bord),
(0, j*s_bord), (0, j*s_bord),
(0, j*s_bord)), np.float32)
square_j = square_i + shift_j + shift_bord
square_verts.append(square_j)
# print('square_verts')
# print(square_verts)
return np.array(square_verts, np.float32), mac_norm
def get_square_centres(c_err=0.05, scale=scale):
"""
define macbeth square centres
"""
verts, mac_norm = get_square_verts(c_err, scale=scale)
centres = np.mean(verts, axis=1)
# print('centres')
# print(centres)
return np.array(centres, np.float32)

View File

@@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
from libtuning.generators.raspberrypi_output import RaspberryPiOutput
from libtuning.generators.yaml_output import YamlOutput

View File

@@ -0,0 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
# Base class for a generator to convert dict to tuning file
from pathlib import Path
class Generator(object):
def __init__(self):
pass
def write(self, output_path: Path, output_dict: dict, output_order: list):
raise NotImplementedError

View File

@@ -0,0 +1,114 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright 2022 Raspberry Pi Ltd
#
# Generate tuning file in Raspberry Pi's json format
#
# (Copied from ctt_pretty_print_json.py)
from .generator import Generator
import json
from pathlib import Path
import textwrap
class Encoder(json.JSONEncoder):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.indentation_level = 0
self.hard_break = 120
self.custom_elems = {
'table': 16,
'luminance_lut': 16,
'ct_curve': 3,
'ccm': 3,
'gamma_curve': 2,
'y_target': 2,
'prior': 2
}
def encode(self, o, node_key=None):
if isinstance(o, (list, tuple)):
# Check if we are a flat list of numbers.
if not any(isinstance(el, (list, tuple, dict)) for el in o):
s = ', '.join(json.dumps(el) for el in o)
if node_key in self.custom_elems.keys():
# Special case handling to specify number of elements in a row for tables, ccm, etc.
self.indentation_level += 1
sl = s.split(', ')
num = self.custom_elems[node_key]
chunk = [self.indent_str + ', '.join(sl[x:x + num]) for x in range(0, len(sl), num)]
t = ',\n'.join(chunk)
self.indentation_level -= 1
output = f'\n{self.indent_str}[\n{t}\n{self.indent_str}]'
elif len(s) > self.hard_break - len(self.indent_str):
# Break a long list with wraps.
self.indentation_level += 1
t = textwrap.fill(s, self.hard_break, break_long_words=False,
initial_indent=self.indent_str, subsequent_indent=self.indent_str)
self.indentation_level -= 1
output = f'\n{self.indent_str}[\n{t}\n{self.indent_str}]'
else:
# Smaller lists can remain on a single line.
output = f' [ {s} ]'
return output
else:
# Sub-structures in the list case.
self.indentation_level += 1
output = [self.indent_str + self.encode(el) for el in o]
self.indentation_level -= 1
output = ',\n'.join(output)
return f' [\n{output}\n{self.indent_str}]'
elif isinstance(o, dict):
self.indentation_level += 1
output = []
for k, v in o.items():
if isinstance(v, dict) and len(v) == 0:
# Empty config block special case.
output.append(self.indent_str + f'{json.dumps(k)}: {{ }}')
else:
# Only linebreak if the next node is a config block.
sep = f'\n{self.indent_str}' if isinstance(v, dict) else ''
output.append(self.indent_str + f'{json.dumps(k)}:{sep}{self.encode(v, k)}')
output = ',\n'.join(output)
self.indentation_level -= 1
return f'{{\n{output}\n{self.indent_str}}}'
else:
return ' ' + json.dumps(o)
@property
def indent_str(self) -> str:
return ' ' * self.indentation_level * self.indent
def iterencode(self, o, **kwargs):
return self.encode(o)
class RaspberryPiOutput(Generator):
def __init__(self):
super().__init__()
def _pretty_print(self, in_json: dict) -> str:
if 'version' not in in_json or \
'target' not in in_json or \
'algorithms' not in in_json or \
in_json['version'] < 2.0:
raise RuntimeError('Incompatible JSON dictionary has been provided')
return json.dumps(in_json, cls=Encoder, indent=4, sort_keys=False)
def write(self, output_file: Path, output_dict: dict, output_order: list):
# Write json dictionary to file using ctt's version 2 format
out_json = {
"version": 2.0,
'target': 'bcm2835',
"algorithms": [{f'{module.out_name}': output_dict[module]} for module in output_order]
}
with open(output_file, 'w') as f:
f.write(self._pretty_print(out_json))

View File

@@ -0,0 +1,127 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright 2022 Paul Elder <paul.elder@ideasonboard.com>
#
# Generate tuning file in YAML format
from .generator import Generator
from numbers import Number
from pathlib import Path
import logging
logger = logging.getLogger(__name__)
class YamlOutput(Generator):
def __init__(self):
super().__init__()
def _stringify_number_list(self, listt: list):
line_wrap = 80
line = '[ ' + ', '.join([str(x) for x in listt]) + ' ]'
if len(line) <= line_wrap:
return [line]
out_lines = ['[']
line = ' '
for x in listt:
x_str = str(x)
# If the first number is longer than line_wrap, it'll add an extra line
if len(line) + len(x_str) > line_wrap:
out_lines.append(line)
line = f' {x_str},'
continue
line += f' {x_str},'
out_lines.append(line)
out_lines.append(']')
return out_lines
# @return Array of lines, and boolean of if all elements were numbers
def _stringify_list(self, listt: list):
out_lines = []
all_numbers = set([isinstance(x, Number) for x in listt]).issubset({True})
if all_numbers:
return self._stringify_number_list(listt), True
for value in listt:
if isinstance(value, Number):
out_lines.append(f'- {str(value)}')
elif isinstance(value, str):
out_lines.append(f'- "{value}"')
elif isinstance(value, list):
lines, all_numbers = self._stringify_list(value)
if all_numbers:
out_lines.append( f'- {lines[0]}')
out_lines += [f' {line}' for line in lines[1:]]
else:
out_lines.append( f'-')
out_lines += [f' {line}' for line in lines]
elif isinstance(value, dict):
lines = self._stringify_dict(value)
out_lines.append( f'- {lines[0]}')
out_lines += [f' {line}' for line in lines[1:]]
return out_lines, False
def _stringify_dict(self, dictt: dict):
out_lines = []
for key in dictt:
value = dictt[key]
if isinstance(value, Number):
out_lines.append(f'{key}: {str(value)}')
elif isinstance(value, str):
out_lines.append(f'{key}: "{value}"')
elif isinstance(value, list):
lines, all_numbers = self._stringify_list(value)
if all_numbers:
out_lines.append( f'{key}: {lines[0]}')
out_lines += [f'{" " * (len(key) + 2)}{line}' for line in lines[1:]]
else:
out_lines.append( f'{key}:')
out_lines += [f' {line}' for line in lines]
elif isinstance(value, dict):
lines = self._stringify_dict(value)
out_lines.append( f'{key}:')
out_lines += [f' {line}' for line in lines]
return out_lines
def write(self, output_file: Path, output_dict: dict, output_order: list):
out_lines = [
'%YAML 1.1',
'---',
'version: 1',
# No need to condition this, as libtuning already guarantees that
# we have at least one module. Even if the module has no output,
# its prescence is meaningful.
'algorithms:'
]
for module in output_order:
if module not in output_dict:
continue
out_lines.append(f' - {module.out_name}:')
if len(output_dict[module]) == 0:
continue
if not isinstance(output_dict[module], dict):
logger.error(f'Error: Output of {module.type} is not a dictionary')
continue
lines = self._stringify_dict(output_dict[module])
out_lines += [f' {line}' for line in lines]
with open(output_file, 'w', encoding='utf-8') as f:
for line in out_lines:
f.write(f'{line}\n')

View File

@@ -0,0 +1,75 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
# Gradients that can be used to distribute or map numbers
import libtuning as lt
import math
from numbers import Number
# @brief Gradient for how to allocate pixels to sectors
# @description There are no parameters for the gradients as the domain is the
# number of pixels and the range is the number of sectors, and
# there is only one curve that has a startpoint and endpoint at
# (0, 0) and at (#pixels, #sectors). The exception is for curves
# that *do* have multiple solutions for only two points, such as
# gaussian, and curves of higher polynomial orders if we had them.
#
# \todo There will probably be a helper in the Gradient class, as I have a
# feeling that all the other curves (besides Linear and Gaussian) can be
# implemented in the same way.
class Gradient(object):
def __init__(self):
pass
# @brief Distribute pixels into sectors (only in one dimension)
# @param domain Number of pixels
# @param sectors Number of sectors
# @return A list of number of pixels in each sector
def distribute(self, domain: list, sectors: list) -> list:
raise NotImplementedError
# @brief Map a number on a curve
# @param domain Domain of the curve
# @param rang Range of the curve
# @param x Input on the domain of the curve
# @return y from the range of the curve
def map(self, domain: tuple, rang: tuple, x: Number) -> Number:
raise NotImplementedError
class Linear(Gradient):
# @param remainder Mode of handling remainder
def __init__(self, remainder: lt.Remainder = lt.Remainder.Float):
self.remainder = remainder
def distribute(self, domain: list, sectors: list) -> list:
size = domain / sectors
rem = domain % sectors
if rem == 0:
return [int(size)] * sectors
size = math.ceil(size)
rem = domain % size
output_sectors = [int(size)] * (sectors - 1)
if self.remainder == lt.Remainder.Float:
size = domain / sectors
output_sectors = [size] * sectors
elif self.remainder == lt.Remainder.DistributeFront:
output_sectors.append(int(rem))
elif self.remainder == lt.Remainder.DistributeBack:
output_sectors.insert(0, int(rem))
else:
raise ValueError
return output_sectors
def map(self, domain: tuple, rang: tuple, x: Number) -> Number:
m = (rang[1] - rang[0]) / (domain[1] - domain[0])
b = rang[0] - m * domain[0]
return m * x + b

View File

@@ -0,0 +1,140 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# Container for an image and associated metadata
import binascii
import numpy as np
from pathlib import Path
import pyexiv2 as pyexif
import rawpy as raw
import re
import libtuning as lt
import libtuning.utils as utils
import logging
logger = logging.getLogger(__name__)
class Image:
def __init__(self, path: Path):
self.path = path
self.lsc_only = False
self.color = -1
self.lux = -1
self.macbeth = None
try:
self._load_metadata_exif()
except Exception as e:
logger.error(f'Failed to load metadata from {self.path}: {e}')
raise e
try:
self._read_image_dng()
except Exception as e:
logger.error(f'Failed to load image data from {self.path}: {e}')
raise e
@property
def name(self):
return self.path.name
# May raise KeyError as there are too many to check
def _load_metadata_exif(self):
# RawPy doesn't load all the image tags that we need, so we use py3exiv2
metadata = pyexif.ImageMetadata(str(self.path))
metadata.read()
# The DNG and TIFF/EP specifications use different IFDs to store the
# raw image data and the Exif tags. DNG stores them in a SubIFD and in
# an Exif IFD respectively (named "SubImage1" and "Photo" by pyexiv2),
# while TIFF/EP stores them both in IFD0 (name "Image"). Both are used
# in "DNG" files, with libcamera-apps following the DNG recommendation
# and applications based on picamera2 following TIFF/EP.
#
# This code detects which tags are being used, and therefore extracts the
# correct values.
try:
self.w = metadata['Exif.SubImage1.ImageWidth'].value
subimage = 'SubImage1'
photo = 'Photo'
except KeyError:
self.w = metadata['Exif.Image.ImageWidth'].value
subimage = 'Image'
photo = 'Image'
self.pad = 0
self.h = metadata[f'Exif.{subimage}.ImageLength'].value
white = metadata[f'Exif.{subimage}.WhiteLevel'].value
self.sigbits = int(white).bit_length()
self.fmt = (self.sigbits - 4) // 2
self.exposure = int(metadata[f'Exif.{photo}.ExposureTime'].value * 1000000)
self.againQ8 = metadata[f'Exif.{photo}.ISOSpeedRatings'].value * 256 / 100
self.againQ8_norm = self.againQ8 / 256
self.camName = metadata['Exif.Image.Model'].value
self.blacklevel = int(metadata[f'Exif.{subimage}.BlackLevel'].value[0])
self.blacklevel_16 = self.blacklevel << (16 - self.sigbits)
# Channel order depending on bayer pattern
# The key is the order given by exif, where 0 is R, 1 is G, and 2 is B
# The value is the index where the color can be found, where the first
# is R, then G, then G, then B.
bayer_case = {
'0 1 1 2': (lt.Color.R, lt.Color.GR, lt.Color.GB, lt.Color.B),
'1 2 0 1': (lt.Color.GB, lt.Color.B, lt.Color.R, lt.Color.GR),
'2 1 1 0': (lt.Color.B, lt.Color.GB, lt.Color.GR, lt.Color.R),
'1 0 2 1': (lt.Color.GR, lt.Color.R, lt.Color.B, lt.Color.GB)
}
# Note: This needs to be in IFD0
cfa_pattern = metadata[f'Exif.{subimage}.CFAPattern'].value
self.order = bayer_case[cfa_pattern]
def _read_image_dng(self):
raw_im = raw.imread(str(self.path))
raw_data = raw_im.raw_image
shift = 16 - self.sigbits
c0 = np.left_shift(raw_data[0::2, 0::2].astype(np.int64), shift)
c1 = np.left_shift(raw_data[0::2, 1::2].astype(np.int64), shift)
c2 = np.left_shift(raw_data[1::2, 0::2].astype(np.int64), shift)
c3 = np.left_shift(raw_data[1::2, 1::2].astype(np.int64), shift)
self.channels = [c0, c1, c2, c3]
# Reorder the channels into R, GR, GB, B
self.channels = [self.channels[i] for i in self.order]
# \todo Move this to macbeth.py
def get_patches(self, cen_coords, size=16):
saturated = False
# Obtain channel widths and heights
ch_w, ch_h = self.w, self.h
cen_coords = list(np.array((cen_coords[0])).astype(np.int32))
self.cen_coords = cen_coords
# Squares are ordered by stacking macbeth chart columns from left to
# right. Some useful patch indices:
# white = 3
# black = 23
# 'reds' = 9, 10
# 'blues' = 2, 5, 8, 20, 22
# 'greens' = 6, 12, 17
# greyscale = 3, 7, 11, 15, 19, 23
all_patches = []
for ch in self.channels:
ch_patches = []
for cen in cen_coords:
# Macbeth centre is placed at top left of central 2x2 patch to
# account for rounding. Patch pixels are sorted by pixel
# brightness so spatial information is lost.
patch = ch[cen[1] - 7:cen[1] + 9, cen[0] - 7:cen[0] + 9].flatten()
patch.sort()
if patch[-5] == (2**self.sigbits - 1) * 2**(16 - self.sigbits):
saturated = True
ch_patches.append(patch)
all_patches.append(ch_patches)
self.patches = all_patches
return not saturated

View File

@@ -0,0 +1,209 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
# An infrastructure for camera tuning tools
import argparse
import logging
import libtuning as lt
import libtuning.utils as utils
from enum import Enum, IntEnum
logger = logging.getLogger(__name__)
class Color(IntEnum):
R = 0
GR = 1
GB = 2
B = 3
class Debug(Enum):
Plot = 1
# @brief What to do with the leftover pixels after dividing them into ALSC
# sectors, when the division gradient is uniform
# @var Float Force floating point division so all sectors divide equally
# @var DistributeFront Divide the remainder equally (until running out,
# obviously) into the existing sectors, starting from the front
# @var DistributeBack Same as DistributeFront but starting from the back
class Remainder(Enum):
Float = 0
DistributeFront = 1
DistributeBack = 2
# @brief A helper class to contain a default value for a module configuration
# parameter
class Param(object):
# @var Required The value contained in this instance is irrelevant, and the
# value must be provided by the tuning configuration file.
# @var Optional If the value is not provided by the tuning configuration
# file, then the value contained in this instance will be used instead.
# @var Hardcode The value contained in this instance will be used
class Mode(Enum):
Required = 0
Optional = 1
Hardcode = 2
# @param name Name of the parameter. Shall match the name used in the
# configuration file for the parameter
# @param required Whether or not a value is required in the config
# parameter of get_value()
# @param val Default value (only relevant if mode is Optional)
def __init__(self, name: str, required: Mode, val=None):
self.name = name
self.__required = required
self.val = val
def get_value(self, config: dict):
if self.__required is self.Mode.Hardcode:
return self.val
if self.__required is self.Mode.Required and self.name not in config:
raise ValueError(f'Parameter {self.name} is required but not provided in the configuration')
return config[self.name] if self.required else self.val
@property
def required(self):
return self.__required is self.Mode.Required
# @brief Used by libtuning to auto-generate help information for the tuning
# script on the available parameters for the configuration file
# \todo Implement this
@property
def info(self):
raise NotImplementedError
class Tuner(object):
# External functions
def __init__(self, platform_name):
self.name = platform_name
self.modules = []
self.parser = None
self.generator = None
self.output_order = []
self.config = {}
self.output = {}
def add(self, module):
self.modules.append(module)
def set_input_parser(self, parser):
self.parser = parser
def set_output_formatter(self, output):
self.generator = output
def set_output_order(self, modules):
self.output_order = modules
# @brief Convert classes in self.output_order to the instances in self.modules
def _prepare_output_order(self):
output_order = self.output_order
self.output_order = []
for module_type in output_order:
modules = [module for module in self.modules if module.type == module_type.type]
if len(modules) > 1:
logger.error(f'Multiple modules found for module type "{module_type.type}"')
return False
if len(modules) < 1:
logger.error(f'No module found for module type "{module_type.type}"')
return False
self.output_order.append(modules[0])
return True
# \todo Validate parser and generator at Tuner construction time?
def _validate_settings(self):
if self.parser is None:
logger.error('Missing parser')
return False
if self.generator is None:
logger.error('Missing generator')
return False
if len(self.modules) == 0:
logger.error('No modules added')
return False
if len(self.output_order) != len(self.modules):
logger.error('Number of outputs does not match number of modules')
return False
return True
def _process_args(self, argv, platform_name):
parser = argparse.ArgumentParser(description=f'Camera Tuning for {platform_name}')
parser.add_argument('-i', '--input', type=str, required=True,
help='''Directory containing calibration images (required).
Images for ALSC must be named "alsc_{Color Temperature}k_1[u].dng",
and all other images must be named "{Color Temperature}k_{Lux Level}l.dng"''')
parser.add_argument('-o', '--output', type=str, required=True,
help='Output file (required)')
# It is not our duty to scan all modules to figure out their default
# options, so simply return an empty configuration if none is provided.
parser.add_argument('-c', '--config', type=str, default='',
help='Config file (optional)')
# \todo Check if we really need this or if stderr is good enough, or if
# we want a better logging infrastructure with log levels
parser.add_argument('-l', '--log', type=str, default=None,
help='Output log file (optional)')
return parser.parse_args(argv[1:])
def run(self, argv):
args = self._process_args(argv, self.name)
if args is None:
return -1
if not self._validate_settings():
return -1
if not self._prepare_output_order():
return -1
if len(args.config) > 0:
self.config, disable = self.parser.parse(args.config, self.modules)
else:
self.config = {'general': {}}
disable = []
# Remove disabled modules
for module in disable:
if module in self.modules:
self.modules.remove(module)
for module in self.modules:
if not module.validate_config(self.config):
logger.error(f'Config is invalid for module {module.type}')
return -1
has_lsc = any(isinstance(m, lt.modules.lsc.LSC) for m in self.modules)
# Only one LSC module allowed
has_only_lsc = has_lsc and len(self.modules) == 1
images = utils.load_images(args.input, self.config, not has_only_lsc, has_lsc)
if images is None or len(images) == 0:
logger.error(f'No images were found, or able to load')
return -1
# Do the tuning
for module in self.modules:
out = module.process(self.config, images, self.output)
if out is None:
logger.warning(f'Module {module.hr_name} failed to process...')
continue
self.output[module] = out
self.generator.write(args.output, self.output, self.output_order)
return 0

View File

@@ -0,0 +1,537 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2024, Ideas on Board Oy
#
# Locate and extract Macbeth charts from images
# (Copied from: ctt_macbeth_locator.py)
# \todo Add debugging
import cv2
import os
from pathlib import Path
import numpy as np
import warnings
import logging
from sklearn import cluster as cluster
from .ctt_ransac import get_square_verts, get_square_centres
from .image import Image
logger = logging.getLogger(__name__)
class MacbethError(Exception):
pass
# Reshape image to fixed width without distorting returns image and scale
# factor
def reshape(img, width):
factor = width / img.shape[0]
return cv2.resize(img, None, fx=factor, fy=factor), factor
# Correlation function to quantify match
def correlate(im1, im2):
f1 = im1.flatten()
f2 = im2.flatten()
cor = np.corrcoef(f1, f2)
return cor[0][1]
# @brief Compute coordinates of macbeth chart vertices and square centres
# @return (max_cor, best_map_col_norm, fit_coords, success)
#
# Also returns an error/success message for debugging purposes. Additionally,
# it scores the match with a confidence value.
#
# Brief explanation of the macbeth chart locating algorithm:
# - Find rectangles within image
# - Take rectangles within percentage offset of median perimeter. The
# assumption is that these will be the macbeth squares
# - For each potential square, find the 24 possible macbeth centre locations
# that would produce a square in that location
# - Find clusters of potential macbeth chart centres to find the potential
# macbeth centres with the most votes, i.e. the most likely ones
# - For each potential macbeth centre, use the centres of the squares that
# voted for it to find macbeth chart corners
# - For each set of corners, transform the possible match into normalised
# space and correlate with a reference chart to evaluate the match
# - Select the highest correlation as the macbeth chart match, returning the
# correlation as the confidence score
#
# \todo Clean this up
def get_macbeth_chart(img, ref_data):
ref, ref_w, ref_h, ref_corns = ref_data
# The code will raise and catch a MacbethError in case of a problem, trying
# to give some likely reasons why the problem occured, hence the try/except
try:
# Obtain image, convert to grayscale and normalise
src = img
src, factor = reshape(src, 200)
original = src.copy()
a = 125 / np.average(src)
src_norm = cv2.convertScaleAbs(src, alpha=a, beta=0)
# This code checks if there are seperate colour channels. In the past the
# macbeth locator ran on jpgs and this makes it robust to different
# filetypes. Note that running it on a jpg has 4x the pixels of the
# average bayer channel so coordinates must be doubled.
# This is best done in img_load.py in the get_patches method. The
# coordinates and image width, height must be divided by two if the
# macbeth locator has been run on a demosaicked image.
if len(src_norm.shape) == 3:
src_bw = cv2.cvtColor(src_norm, cv2.COLOR_BGR2GRAY)
else:
src_bw = src_norm
original_bw = src_bw.copy()
# Obtain image edges
sigma = 2
src_bw = cv2.GaussianBlur(src_bw, (0, 0), sigma)
t1, t2 = 50, 100
edges = cv2.Canny(src_bw, t1, t2)
# Dilate edges to prevent self-intersections in contours
k_size = 2
kernel = np.ones((k_size, k_size))
its = 1
edges = cv2.dilate(edges, kernel, iterations=its)
# Find contours in image
conts, _ = cv2.findContours(edges, cv2.RETR_TREE,
cv2.CHAIN_APPROX_NONE)
if len(conts) == 0:
raise MacbethError(
'\nWARNING: No macbeth chart found!'
'\nNo contours found in image\n'
'Possible problems:\n'
'- Macbeth chart is too dark or bright\n'
'- Macbeth chart is occluded\n'
)
# Find quadrilateral contours
epsilon = 0.07
conts_per = []
for i in range(len(conts)):
per = cv2.arcLength(conts[i], True)
poly = cv2.approxPolyDP(conts[i], epsilon * per, True)
if len(poly) == 4 and cv2.isContourConvex(poly):
conts_per.append((poly, per))
if len(conts_per) == 0:
raise MacbethError(
'\nWARNING: No macbeth chart found!'
'\nNo quadrilateral contours found'
'\nPossible problems:\n'
'- Macbeth chart is too dark or bright\n'
'- Macbeth chart is occluded\n'
'- Macbeth chart is out of camera plane\n'
)
# Sort contours by perimeter and get perimeters within percent of median
conts_per = sorted(conts_per, key=lambda x: x[1])
med_per = conts_per[int(len(conts_per) / 2)][1]
side = med_per / 4
perc = 0.1
med_low, med_high = med_per * (1 - perc), med_per * (1 + perc)
squares = []
for i in conts_per:
if med_low <= i[1] and med_high >= i[1]:
squares.append(i[0])
# Obtain coordinates of nomralised macbeth and squares
square_verts, mac_norm = get_square_verts(0.06)
# For each square guess, find 24 possible macbeth chart centres
mac_mids = []
squares_raw = []
for i in range(len(squares)):
square = squares[i]
squares_raw.append(square)
# Convert quads to rotated rectangles. This is required as the
# 'squares' are usually quite irregular quadrilaterls, so
# performing a transform would result in exaggerated warping and
# inaccurate macbeth chart centre placement
rect = cv2.minAreaRect(square)
square = cv2.boxPoints(rect).astype(np.float32)
# Reorder vertices to prevent 'hourglass shape'
square = sorted(square, key=lambda x: x[0])
square_1 = sorted(square[:2], key=lambda x: x[1])
square_2 = sorted(square[2:], key=lambda x: -x[1])
square = np.array(np.concatenate((square_1, square_2)), np.float32)
square = np.reshape(square, (4, 2)).astype(np.float32)
squares[i] = square
# Find 24 possible macbeth chart centres by trasnforming normalised
# macbeth square vertices onto candidate square vertices found in image
for j in range(len(square_verts)):
verts = square_verts[j]
p_mat = cv2.getPerspectiveTransform(verts, square)
mac_guess = cv2.perspectiveTransform(mac_norm, p_mat)
mac_guess = np.round(mac_guess).astype(np.int32)
mac_mid = np.mean(mac_guess, axis=1)
mac_mids.append([mac_mid, (i, j)])
if len(mac_mids) == 0:
raise MacbethError(
'\nWARNING: No macbeth chart found!'
'\nNo possible macbeth charts found within image'
'\nPossible problems:\n'
'- Part of the macbeth chart is outside the image\n'
'- Quadrilaterals in image background\n'
)
# Reshape data
for i in range(len(mac_mids)):
mac_mids[i][0] = mac_mids[i][0][0]
# Find where midpoints cluster to identify most likely macbeth centres
clustering = cluster.AgglomerativeClustering(
n_clusters=None,
compute_full_tree=True,
distance_threshold=side * 2
)
mac_mids_list = [x[0] for x in mac_mids]
if len(mac_mids_list) == 1:
# Special case of only one valid centre found (probably not needed)
clus_list = []
clus_list.append([mac_mids, len(mac_mids)])
else:
clustering.fit(mac_mids_list)
# Create list of all clusters
clus_list = []
if clustering.n_clusters_ > 1:
for i in range(clustering.labels_.max() + 1):
indices = [j for j, x in enumerate(clustering.labels_) if x == i]
clus = []
for index in indices:
clus.append(mac_mids[index])
clus_list.append([clus, len(clus)])
clus_list.sort(key=lambda x: -x[1])
elif clustering.n_clusters_ == 1:
# Special case of only one cluster found
clus_list.append([mac_mids, len(mac_mids)])
else:
raise MacbethError(
'\nWARNING: No macebth chart found!'
'\nNo clusters found'
'\nPossible problems:\n'
'- NA\n'
)
# Keep only clusters with enough votes
clus_len_max = clus_list[0][1]
clus_tol = 0.7
for i in range(len(clus_list)):
if clus_list[i][1] < clus_len_max * clus_tol:
clus_list = clus_list[:i]
break
cent = np.mean(clus_list[i][0], axis=0)[0]
clus_list[i].append(cent)
# Get centres of each normalised square
reference = get_square_centres(0.06)
# For each possible macbeth chart, transform image into
# normalised space and find correlation with reference
max_cor = 0
best_map = None
best_fit = None
best_cen_fit = None
best_ref_mat = None
for clus in clus_list:
clus = clus[0]
sq_cents = []
ref_cents = []
i_list = [p[1][0] for p in clus]
for point in clus:
i, j = point[1]
# Remove any square that voted for two different points within
# the same cluster. This causes the same point in the image to be
# mapped to two different reference square centres, resulting in
# a very distorted perspective transform since cv2.findHomography
# simply minimises error.
# This phenomenon is not particularly likely to occur due to the
# enforced distance threshold in the clustering fit but it is
# best to keep this in just in case.
if i_list.count(i) == 1:
square = squares_raw[i]
sq_cent = np.mean(square, axis=0)
ref_cent = reference[j]
sq_cents.append(sq_cent)
ref_cents.append(ref_cent)
# At least four squares need to have voted for a centre in
# order for a transform to be found
if len(sq_cents) < 4:
raise MacbethError(
'\nWARNING: No macbeth chart found!'
'\nNot enough squares found'
'\nPossible problems:\n'
'- Macbeth chart is occluded\n'
'- Macbeth chart is too dark of bright\n'
)
ref_cents = np.array(ref_cents)
sq_cents = np.array(sq_cents)
# Find best fit transform from normalised centres to image
h_mat, mask = cv2.findHomography(ref_cents, sq_cents)
if 'None' in str(type(h_mat)):
raise MacbethError(
'\nERROR\n'
)
# Transform normalised corners and centres into image space
mac_fit = cv2.perspectiveTransform(mac_norm, h_mat)
mac_cen_fit = cv2.perspectiveTransform(np.array([reference]), h_mat)
# Transform located corners into reference space
ref_mat = cv2.getPerspectiveTransform(
mac_fit,
np.array([ref_corns])
)
map_to_ref = cv2.warpPerspective(
original_bw, ref_mat,
(ref_w, ref_h)
)
# Normalise brigthness
a = 125 / np.average(map_to_ref)
map_to_ref = cv2.convertScaleAbs(map_to_ref, alpha=a, beta=0)
# Find correlation with bw reference macbeth
cor = correlate(map_to_ref, ref)
# Keep only if best correlation
if cor > max_cor:
max_cor = cor
best_map = map_to_ref
best_fit = mac_fit
best_cen_fit = mac_cen_fit
best_ref_mat = ref_mat
# Rotate macbeth by pi and recorrelate in case macbeth chart is
# upside-down
mac_fit_inv = np.array(
([[mac_fit[0][2], mac_fit[0][3],
mac_fit[0][0], mac_fit[0][1]]])
)
mac_cen_fit_inv = np.flip(mac_cen_fit, axis=1)
ref_mat = cv2.getPerspectiveTransform(
mac_fit_inv,
np.array([ref_corns])
)
map_to_ref = cv2.warpPerspective(
original_bw, ref_mat,
(ref_w, ref_h)
)
a = 125 / np.average(map_to_ref)
map_to_ref = cv2.convertScaleAbs(map_to_ref, alpha=a, beta=0)
cor = correlate(map_to_ref, ref)
if cor > max_cor:
max_cor = cor
best_map = map_to_ref
best_fit = mac_fit_inv
best_cen_fit = mac_cen_fit_inv
best_ref_mat = ref_mat
# Check best match is above threshold
cor_thresh = 0.6
if max_cor < cor_thresh:
raise MacbethError(
'\nWARNING: Correlation too low'
'\nPossible problems:\n'
'- Bad lighting conditions\n'
'- Macbeth chart is occluded\n'
'- Background is too noisy\n'
'- Macbeth chart is out of camera plane\n'
)
# Represent coloured macbeth in reference space
best_map_col = cv2.warpPerspective(
original, best_ref_mat, (ref_w, ref_h)
)
best_map_col = cv2.resize(
best_map_col, None, fx=4, fy=4
)
a = 125 / np.average(best_map_col)
best_map_col_norm = cv2.convertScaleAbs(
best_map_col, alpha=a, beta=0
)
# Rescale coordinates to original image size
fit_coords = (best_fit / factor, best_cen_fit / factor)
return (max_cor, best_map_col_norm, fit_coords, True)
# Catch macbeth errors and continue with code
except MacbethError as error:
# \todo: This happens so many times in a normal run, that it shadows
# all the relevant output
# logger.warning(error)
return (0, None, None, False)
def find_macbeth(img, mac_config):
small_chart = mac_config['small']
show = mac_config['show']
# Catch the warnings
warnings.simplefilter("ignore")
warnings.warn("runtime", RuntimeWarning)
# Reference macbeth chart is created that will be correlated with the
# located macbeth chart guess to produce a confidence value for the match.
script_dir = Path(os.path.realpath(os.path.dirname(__file__)))
macbeth_ref_path = script_dir.joinpath('macbeth_ref.pgm')
ref = cv2.imread(str(macbeth_ref_path), flags=cv2.IMREAD_GRAYSCALE)
ref_w = 120
ref_h = 80
rc1 = (0, 0)
rc2 = (0, ref_h)
rc3 = (ref_w, ref_h)
rc4 = (ref_w, 0)
ref_corns = np.array((rc1, rc2, rc3, rc4), np.float32)
ref_data = (ref, ref_w, ref_h, ref_corns)
# Locate macbeth chart
cor, mac, coords, ret = get_macbeth_chart(img, ref_data)
# Following bits of code try to fix common problems with simple techniques.
# If now or at any point the best correlation is of above 0.75, then
# nothing more is tried as this is a high enough confidence to ensure
# reliable macbeth square centre placement.
# Keep a list that will include this and any brightened up versions of
# the image for reuse.
all_images = [img]
for brightness in [2, 4]:
if cor >= 0.75:
break
img_br = cv2.convertScaleAbs(img, alpha=brightness, beta=0)
all_images.append(img_br)
cor_b, mac_b, coords_b, ret_b = get_macbeth_chart(img_br, ref_data)
if cor_b > cor:
cor, mac, coords, ret = cor_b, mac_b, coords_b, ret_b
# In case macbeth chart is too small, take a selection of the image and
# attempt to locate macbeth chart within that. The scale increment is
# root 2
# These variables will be used to transform the found coordinates at
# smaller scales back into the original. If ii is still -1 after this
# section that means it was not successful
ii = -1
w_best = 0
h_best = 0
d_best = 100
# d_best records the scale of the best match. Macbeth charts are only looked
# for at one scale increment smaller than the current best match in order to avoid
# unecessarily searching for macbeth charts at small scales.
# If a macbeth chart ha already been found then set d_best to 0
if cor != 0:
d_best = 0
for index, pair in enumerate([{'sel': 2 / 3, 'inc': 1 / 6},
{'sel': 1 / 2, 'inc': 1 / 8},
{'sel': 1 / 3, 'inc': 1 / 12},
{'sel': 1 / 4, 'inc': 1 / 16}]):
if cor >= 0.75:
break
# Check if we need to check macbeth charts at even smaller scales. This
# slows the code down significantly and has therefore been omitted by
# default, however it is not unusably slow so might be useful if the
# macbeth chart is too small to be picked up to by the current
# subselections. Use this for macbeth charts with side lengths around
# 1/5 image dimensions (and smaller...?) it is, however, recommended
# that macbeth charts take up as large as possible a proportion of the
# image.
if index >= 2 and (not small_chart or d_best <= index - 1):
break
w, h = list(img.shape[:2])
# Set dimensions of the subselection and the step along each axis
# between selections
w_sel = int(w * pair['sel'])
h_sel = int(h * pair['sel'])
w_inc = int(w * pair['inc'])
h_inc = int(h * pair['inc'])
loop = int(((1 - pair['sel']) / pair['inc']) + 1)
# For each subselection, look for a macbeth chart
for img_br in all_images:
for i in range(loop):
for j in range(loop):
w_s, h_s = i * w_inc, j * h_inc
img_sel = img_br[w_s:w_s + w_sel, h_s:h_s + h_sel]
cor_ij, mac_ij, coords_ij, ret_ij = get_macbeth_chart(img_sel, ref_data)
# If the correlation is better than the best then record the
# scale and current subselection at which macbeth chart was
# found. Also record the coordinates, macbeth chart and message.
if cor_ij > cor:
cor = cor_ij
mac, coords, ret = mac_ij, coords_ij, ret_ij
ii, jj = i, j
w_best, h_best = w_inc, h_inc
d_best = index + 1
# Transform coordinates from subselection to original image
if ii != -1:
for a in range(len(coords)):
for b in range(len(coords[a][0])):
coords[a][0][b][1] += ii * w_best
coords[a][0][b][0] += jj * h_best
if not ret:
return None
coords_fit = coords
if cor < 0.75:
logger.warning(f'Low confidence {cor:.3f} for macbeth chart')
if show:
draw_macbeth_results(img, coords_fit)
return coords_fit
def locate_macbeth(image: Image, config: dict):
# Find macbeth centres
av_chan = (np.mean(np.array(image.channels), axis=0) / (2**16))
av_val = np.mean(av_chan)
if av_val < image.blacklevel_16 / (2**16) + 1 / 64:
logger.warning(f'Image {image.path.name} too dark')
return None
macbeth = find_macbeth(av_chan, config['general']['macbeth'])
if macbeth is None:
logger.warning(f'No macbeth chart found in {image.path.name}')
return None
mac_cen_coords = macbeth[1]
if not image.get_patches(mac_cen_coords):
logger.warning(f'Macbeth patches have saturated in {image.path.name}')
return None
image.macbeth = macbeth
return macbeth

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>

View File

@@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
from libtuning.modules.agc.agc import AGC
from libtuning.modules.agc.rkisp1 import AGCRkISP1

View File

@@ -0,0 +1,21 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
from ..module import Module
import libtuning as lt
class AGC(Module):
type = 'agc'
hr_name = 'AGC (Base)'
out_name = 'GenericAGC'
# \todo Add sector shapes and stuff just like lsc
def __init__(self, *,
debug: list):
super().__init__()
self.debug = debug

View File

@@ -0,0 +1,79 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
#
# rkisp1.py - AGC module for tuning rkisp1
from .agc import AGC
import libtuning as lt
class AGCRkISP1(AGC):
hr_name = 'AGC (RkISP1)'
out_name = 'Agc'
def __init__(self, **kwargs):
super().__init__(**kwargs)
# We don't actually need anything from the config file
def validate_config(self, config: dict) -> bool:
return True
def _generate_metering_modes(self) -> dict:
centre_weighted = [
0, 0, 0, 0, 0,
0, 6, 8, 6, 0,
0, 8, 16, 8, 0,
0, 6, 8, 6, 0,
0, 0, 0, 0, 0
]
spot = [
0, 0, 0, 0, 0,
0, 2, 4, 2, 0,
0, 4, 16, 4, 0,
0, 2, 4, 2, 0,
0, 0, 0, 0, 0
]
matrix = [1 for i in range(0, 25)]
return {
'MeteringCentreWeighted': centre_weighted,
'MeteringSpot': spot,
'MeteringMatrix': matrix
}
def _generate_exposure_modes(self) -> dict:
normal = {'shutter': [100, 10000, 30000, 60000, 120000],
'gain': [2.0, 4.0, 6.0, 6.0, 6.0]}
short = {'shutter': [100, 5000, 10000, 20000, 120000],
'gain': [2.0, 4.0, 6.0, 6.0, 6.0]}
return {'ExposureNormal': normal, 'ExposureShort': short}
def _generate_constraint_modes(self) -> dict:
normal = {'lower': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.5}}
highlight = {
'lower': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.5},
'upper': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.8}
}
return {'ConstraintNormal': normal, 'ConstraintHighlight': highlight}
def _generate_y_target(self) -> list:
return 0.5
def process(self, config: dict, images: list, outputs: dict) -> dict:
output = {}
output['AeMeteringMode'] = self._generate_metering_modes()
output['AeExposureMode'] = self._generate_exposure_modes()
output['AeConstraintMode'] = self._generate_constraint_modes()
output['relativeLuminanceTarget'] = self._generate_y_target()
# \todo Debug functionality
return output

View File

@@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
from libtuning.modules.ccm.ccm import CCM
from libtuning.modules.ccm.rkisp1 import CCMRkISP1

View File

@@ -0,0 +1,41 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
# Copyright (C) 2024, Ideas on Board
#
# Base Ccm tuning module
from ..module import Module
from libtuning.ctt_ccm import ccm
import logging
logger = logging.getLogger(__name__)
class CCM(Module):
type = 'ccm'
hr_name = 'CCM (Base)'
out_name = 'GenericCCM'
def __init__(self, debug: list):
super().__init__()
self.debug = debug
def do_calibration(self, images):
logger.info('Starting CCM calibration')
imgs = [img for img in images if img.macbeth is not None]
# todo: Take LSC calibration results into account.
cal_cr_list = None
cal_cb_list = None
try:
ccms = ccm(imgs, cal_cr_list, cal_cb_list)
except ArithmeticError:
logger.error('CCM calibration failed')
return None
return ccms

View File

@@ -0,0 +1,28 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
# Copyright (C) 2024, Ideas on Board
#
# Ccm module for tuning rkisp1
from .ccm import CCM
class CCMRkISP1(CCM):
hr_name = 'Crosstalk Correction (RkISP1)'
out_name = 'Ccm'
def __init__(self, **kwargs):
super().__init__(**kwargs)
# We don't need anything from the config file.
def validate_config(self, config: dict) -> bool:
return True
def process(self, config: dict, images: list, outputs: dict) -> dict:
output = {}
ccms = self.do_calibration(images)
output['ccms'] = ccms
return output

View File

@@ -0,0 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
from libtuning.modules.lsc.lsc import LSC
from libtuning.modules.lsc.raspberrypi import ALSCRaspberryPi
from libtuning.modules.lsc.rkisp1 import LSCRkISP1

View File

@@ -0,0 +1,75 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
from ..module import Module
import libtuning as lt
import libtuning.utils as utils
import numpy as np
class LSC(Module):
type = 'lsc'
hr_name = 'LSC (Base)'
out_name = 'GenericLSC'
def __init__(self, *,
debug: list,
sector_shape: tuple,
sector_x_gradient: lt.Gradient,
sector_y_gradient: lt.Gradient,
sector_average_function: lt.Average,
smoothing_function: lt.Smoothing):
super().__init__()
self.debug = debug
self.sector_shape = sector_shape
self.sector_x_gradient = sector_x_gradient
self.sector_y_gradient = sector_y_gradient
self.sector_average_function = sector_average_function
self.smoothing_function = smoothing_function
def _enumerate_lsc_images(self, images):
for image in images:
if image.lsc_only:
yield image
def _get_grid(self, channel, img_w, img_h):
# List of number of pixels in each sector
sectors_x = self.sector_x_gradient.distribute(img_w / 2, self.sector_shape[0])
sectors_y = self.sector_y_gradient.distribute(img_h / 2, self.sector_shape[1])
grid = []
r = 0
for y in sectors_y:
c = 0
for x in sectors_x:
grid.append(self.sector_average_function.average(channel[r:r + y, c:c + x]))
c += x
r += y
return np.array(grid)
def _lsc_single_channel(self, channel: np.array,
image: lt.Image, green_grid: np.array = None):
grid = self._get_grid(channel, image.w, image.h)
# Clamp the values to a small positive, so that the following 1/grid
# doesn't produce negative results.
grid = np.maximum(grid - image.blacklevel_16, 0.1)
if green_grid is None:
table = np.reshape(1 / grid, self.sector_shape[::-1])
else:
table = np.reshape(green_grid / grid, self.sector_shape[::-1])
table = self.smoothing_function.smoothing(table)
if green_grid is None:
table = table / np.min(table)
return table, grid

View File

@@ -0,0 +1,248 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
# ALSC module for tuning Raspberry Pi
from .lsc import LSC
import libtuning as lt
import libtuning.utils as utils
from numbers import Number
import numpy as np
import logging
logger = logging.getLogger(__name__)
class ALSCRaspberryPi(LSC):
# Override the type name so that the parser can match the entry in the
# config file.
type = 'alsc'
hr_name = 'ALSC (Raspberry Pi)'
out_name = 'rpi.alsc'
compatible = ['raspberrypi']
def __init__(self, *,
do_color: lt.Param,
luminance_strength: lt.Param,
**kwargs):
super().__init__(**kwargs)
self.do_color = do_color
self.luminance_strength = luminance_strength
self.output_range = (0, 3.999)
def validate_config(self, config: dict) -> bool:
if self not in config:
logger.error(f'{self.type} not in config')
return False
valid = True
conf = config[self]
lum_key = self.luminance_strength.name
color_key = self.do_color.name
if lum_key not in conf and self.luminance_strength.required:
logger.error(f'{lum_key} is not in config')
valid = False
if lum_key in conf and (conf[lum_key] < 0 or conf[lum_key] > 1):
logger.warning(f'{lum_key} is not in range [0, 1]; defaulting to 0.5')
if color_key not in conf and self.do_color.required:
logger.error(f'{color_key} is not in config')
valid = False
return valid
# @return Image color temperature, flattened array of red calibration table
# (containing {sector size} elements), flattened array of blue
# calibration table, flattened array of green calibration
# table
def _do_single_alsc(self, image: lt.Image, do_alsc_colour: bool):
average_green = np.mean((image.channels[lt.Color.GR:lt.Color.GB + 1]), axis=0)
cg, g = self._lsc_single_channel(average_green, image)
if not do_alsc_colour:
return image.color, None, None, cg.flatten()
cr, _ = self._lsc_single_channel(image.channels[lt.Color.R], image, g)
cb, _ = self._lsc_single_channel(image.channels[lt.Color.B], image, g)
# \todo implement debug
return image.color, cr.flatten(), cb.flatten(), cg.flatten()
# @return Red shading table, Blue shading table, Green shading table,
# number of images processed
def _do_all_alsc(self, images: list, do_alsc_colour: bool, general_conf: dict) -> (list, list, list, Number, int):
# List of colour temperatures
list_col = []
# Associated calibration tables
list_cr = []
list_cb = []
list_cg = []
count = 0
for image in self._enumerate_lsc_images(images):
col, cr, cb, cg = self._do_single_alsc(image, do_alsc_colour)
list_col.append(col)
list_cr.append(cr)
list_cb.append(cb)
list_cg.append(cg)
count += 1
# Convert to numpy array for data manipulation
list_col = np.array(list_col)
list_cr = np.array(list_cr)
list_cb = np.array(list_cb)
list_cg = np.array(list_cg)
cal_cr_list = []
cal_cb_list = []
# Note: Calculation of average corners and center of the shading tables
# has been removed (which ctt had, as it was unused)
# Average all values for luminance shading and return one table for all temperatures
lum_lut = list(np.round(np.mean(list_cg, axis=0), 3))
if not do_alsc_colour:
return None, None, lum_lut, count
for ct in sorted(set(list_col)):
# Average tables for the same colour temperature
indices = np.where(list_col == ct)
ct = int(ct)
t_r = np.round(np.mean(list_cr[indices], axis=0), 3)
t_b = np.round(np.mean(list_cb[indices], axis=0), 3)
cr_dict = {
'ct': ct,
'table': list(t_r)
}
cb_dict = {
'ct': ct,
'table': list(t_b)
}
cal_cr_list.append(cr_dict)
cal_cb_list.append(cb_dict)
return cal_cr_list, cal_cb_list, lum_lut, count
# @brief Calculate sigma from two adjacent gain tables
def _calcSigma(self, g1, g2):
g1 = np.reshape(g1, self.sector_shape[::-1])
g2 = np.reshape(g2, self.sector_shape[::-1])
# Apply gains to gain table
gg = g1 / g2
if np.mean(gg) < 1:
gg = 1 / gg
# For each internal patch, compute average difference between it and
# its 4 neighbours, then append to list
diffs = []
for i in range(self.sector_shape[1] - 2):
for j in range(self.sector_shape[0] - 2):
# Indexing is incremented by 1 since all patches on borders are
# not counted
diff = np.abs(gg[i + 1][j + 1] - gg[i][j + 1])
diff += np.abs(gg[i + 1][j + 1] - gg[i + 2][j + 1])
diff += np.abs(gg[i + 1][j + 1] - gg[i + 1][j])
diff += np.abs(gg[i + 1][j + 1] - gg[i + 1][j + 2])
diffs.append(diff / 4)
mean_diff = np.mean(diffs)
return np.round(mean_diff, 5)
# @brief Obtains sigmas for red and blue, effectively a measure of the
# 'error'
def _get_sigma(self, cal_cr_list, cal_cb_list):
# Provided colour alsc tables were generated for two different colour
# temperatures sigma is calculated by comparing two calibration temperatures
# adjacent in colour space
color_temps = [cal['ct'] for cal in cal_cr_list]
# Calculate sigmas for each adjacent color_temps and return worst one
sigma_rs = []
sigma_bs = []
for i in range(len(color_temps) - 1):
sigma_rs.append(self._calcSigma(cal_cr_list[i]['table'], cal_cr_list[i + 1]['table']))
sigma_bs.append(self._calcSigma(cal_cb_list[i]['table'], cal_cb_list[i + 1]['table']))
# Return maximum sigmas, not necessarily from the same colour
# temperature interval
sigma_r = max(sigma_rs) if sigma_rs else 0.005
sigma_b = max(sigma_bs) if sigma_bs else 0.005
return sigma_r, sigma_b
def process(self, config: dict, images: list, outputs: dict) -> dict:
output = {
'omega': 1.3,
'n_iter': 100,
'luminance_strength': 0.7
}
conf = config[self]
general_conf = config['general']
do_alsc_colour = self.do_color.get_value(conf)
# \todo I have no idea where this input parameter is used
luminance_strength = self.luminance_strength.get_value(conf)
if luminance_strength < 0 or luminance_strength > 1:
luminance_strength = 0.5
output['luminance_strength'] = luminance_strength
# \todo Validate images from greyscale camera and force grescale mode
# \todo Debug functionality
alsc_out = self._do_all_alsc(images, do_alsc_colour, general_conf)
# \todo Handle the second green lut
cal_cr_list, cal_cb_list, luminance_lut, count = alsc_out
if not do_alsc_colour:
output['luminance_lut'] = luminance_lut
output['n_iter'] = 0
return output
output['calibrations_Cr'] = cal_cr_list
output['calibrations_Cb'] = cal_cb_list
output['luminance_lut'] = luminance_lut
# The sigmas determine the strength of the adaptive algorithm, that
# cleans up any lens shading that has slipped through the alsc. These
# are determined by measuring a 'worst-case' difference between two
# alsc tables that are adjacent in colour space. If, however, only one
# colour temperature has been provided, then this difference can not be
# computed as only one table is available.
# To determine the sigmas you would have to estimate the error of an
# alsc table with only the image it was taken on as a check. To avoid
# circularity, dfault exaggerated sigmas are used, which can result in
# too much alsc and is therefore not advised.
# In general, just take another alsc picture at another colour
# temperature!
if count == 1:
output['sigma'] = 0.005
output['sigma_Cb'] = 0.005
logger.warning('Only one alsc calibration found; standard sigmas used for adaptive algorithm.')
return output
# Obtain worst-case scenario residual sigmas
sigma_r, sigma_b = self._get_sigma(cal_cr_list, cal_cb_list)
output['sigma'] = np.round(sigma_r, 5)
output['sigma_Cb'] = np.round(sigma_b, 5)
return output

View File

@@ -0,0 +1,116 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
# LSC module for tuning rkisp1
from .lsc import LSC
import libtuning as lt
import libtuning.utils as utils
from numbers import Number
import numpy as np
class LSCRkISP1(LSC):
hr_name = 'LSC (RkISP1)'
out_name = 'LensShadingCorrection'
# \todo Not sure if this is useful. Probably will remove later.
compatible = ['rkisp1']
def __init__(self, *args, **kwargs):
super().__init__(**kwargs)
# We don't actually need anything from the config file
def validate_config(self, config: dict) -> bool:
return True
# @return Image color temperature, flattened array of red calibration table
# (containing {sector size} elements), flattened array of blue
# calibration table, flattened array of (red's) green calibration
# table, flattened array of (blue's) green calibration table
def _do_single_lsc(self, image: lt.Image):
# Perform LSC on each colour channel independently. A future enhancement
# worth investigating would be splitting the luminance and chrominance
# LSC as done by Raspberry Pi.
cgr, _ = self._lsc_single_channel(image.channels[lt.Color.GR], image)
cgb, _ = self._lsc_single_channel(image.channels[lt.Color.GB], image)
cr, _ = self._lsc_single_channel(image.channels[lt.Color.R], image)
cb, _ = self._lsc_single_channel(image.channels[lt.Color.B], image)
return image.color, cr.flatten(), cb.flatten(), cgr.flatten(), cgb.flatten()
# @return List of dictionaries of color temperature, red table, red's green
# table, blue's green table, and blue table
def _do_all_lsc(self, images: list) -> list:
output_list = []
output_map_func = lt.gradient.Linear().map
# List of colour temperatures
list_col = []
# Associated calibration tables
list_cr = []
list_cb = []
list_cgr = []
list_cgb = []
for image in self._enumerate_lsc_images(images):
col, cr, cb, cgr, cgb = self._do_single_lsc(image)
list_col.append(col)
list_cr.append(cr)
list_cb.append(cb)
list_cgr.append(cgr)
list_cgb.append(cgb)
# Convert to numpy array for data manipulation
list_col = np.array(list_col)
list_cr = np.array(list_cr)
list_cb = np.array(list_cb)
list_cgr = np.array(list_cgr)
list_cgb = np.array(list_cgb)
for color_temperature in sorted(set(list_col)):
# Average tables for the same colour temperature
indices = np.where(list_col == color_temperature)
color_temperature = int(color_temperature)
tables = []
for lis in [list_cr, list_cgr, list_cgb, list_cb]:
table = np.mean(lis[indices], axis=0)
table = output_map_func((1, 4), (1024, 4096), table)
table = np.clip(table, 1024, 4095)
table = np.round(table).astype('int32').tolist()
tables.append(table)
entry = {
'ct': color_temperature,
'r': tables[0],
'gr': tables[1],
'gb': tables[2],
'b': tables[3],
}
output_list.append(entry)
return output_list
def process(self, config: dict, images: list, outputs: dict) -> dict:
output = {}
# \todo This should actually come from self.sector_{x,y}_gradient
size_gradient = lt.gradient.Linear(lt.Remainder.Float)
output['x-size'] = size_gradient.distribute(0.5, 8)
output['y-size'] = size_gradient.distribute(0.5, 8)
output['sets'] = self._do_all_lsc(images)
if len(output['sets']) == 0:
return None
# \todo Validate images from greyscale camera and force grescale mode
# \todo Debug functionality
return output

View File

@@ -0,0 +1,32 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
# Base class for algorithm-specific tuning modules
# @var type Type of the module. Defined in the base module.
# @var out_name The key that will be used for the algorithm in the algorithms
# dictionary in the tuning output file
# @var hr_name Human-readable module name, mostly for debugging
class Module(object):
type = 'base'
hr_name = 'Base Module'
out_name = 'GenericAlgorithm'
def __init__(self):
pass
def validate_config(self, config: dict) -> bool:
raise NotImplementedError
# @brief Do the module's processing
# @param config Full configuration from the input configuration file
# @param images List of images to process
# @param outputs The outputs of all modules that were executed before this
# module. Note that this is an input parameter, and the
# output of this module should be returned directly
# @return Result of the module's processing. It may be empty. None
# indicates failure and that the result should not be used.
def process(self, config: dict, images: list, outputs: dict) -> dict:
raise NotImplementedError

View File

@@ -0,0 +1,24 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2024, Ideas on Board
#
# Module implementation for static data
from .module import Module
# This module can be used in cases where the tuning file should contain
# static data.
class StaticModule(Module):
def __init__(self, out_name: str, output: dict = {}):
super().__init__()
self.out_name = out_name
self.hr_name = f'Static {out_name}'
self.type = f'static_{out_name}'
self.output = output
def validate_config(self, config: dict) -> bool:
return True
def process(self, config: dict, images: list, outputs: dict) -> dict:
return self.output

View File

@@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
from libtuning.parsers.raspberrypi_parser import RaspberryPiParser
from libtuning.parsers.yaml_parser import YamlParser

View File

@@ -0,0 +1,21 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
# Base class for a parser for a specific format of config file
class Parser(object):
def __init__(self):
pass
# @brief Parse a config file into a config dict
# @details The config dict shall have one key 'general' with a dict value
# for general configuration options, and all other entries shall
# have the module as the key with its configuration options (as a
# dict) as the value. The config dict shall prune entries that are
# for modules that are not in @a modules.
# @param config (str) Path to config file
# @param modules (list) List of modules
# @return (dict, list) Configuration and list of modules to disable
def parse(self, config_file: str, modules: list) -> (dict, list):
raise NotImplementedError

View File

@@ -0,0 +1,93 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
# Parser for Raspberry Pi config file format
from .parser import Parser
import json
import numbers
import libtuning.utils as utils
class RaspberryPiParser(Parser):
def __init__(self):
super().__init__()
# The string in the 'disable' and 'plot' lists are formatted as
# 'rpi.{module_name}'.
# @brief Enumerate, as a module, @a listt if its value exists in @a dictt
# and it is the name of a valid module in @a modules
def _enumerate_rpi_modules(self, listt, dictt, modules):
for x in listt:
name = x.replace('rpi.', '')
if name not in dictt:
continue
module = utils.get_module_by_typename(modules, name)
if module is not None:
yield module
def _valid_macbeth_option(self, value):
if not isinstance(value, dict):
return False
if list(value.keys()) != ['small', 'show']:
return False
for val in value.values():
if not isinstance(val, numbers.Number):
return False
return True
def parse(self, config_file: str, modules: list) -> (dict, list):
with open(config_file, 'r') as config_json:
config = json.load(config_json)
disable = []
for module in self._enumerate_rpi_modules(config['disable'], config, modules):
disable.append(module)
# Remove the disabled module's config too
config.pop(module.name)
config.pop('disable')
# The raspberrypi config format has 'plot' map to a list of module
# names which should be plotted. libtuning has each module contain the
# plot information in itself so do this conversion.
for module in self._enumerate_rpi_modules(config['plot'], config, modules):
# It's fine to set the value of a potentially disabled module, as
# the object still exists at this point
module.appendValue('debug', 'plot')
config.pop('plot')
# Convert the keys from module name to module instance
new_config = {}
for module_name in config:
module = utils.get_module_by_type_name(modules, module_name)
if module is not None:
new_config[module] = config[module_name]
new_config['general'] = {}
if 'blacklevel' in config:
if not isinstance(config['blacklevel'], numbers.Number):
raise TypeError('Config "blacklevel" must be a number')
# Raspberry Pi's ctt config has magic blacklevel value -1 to mean
# "get it from the image metadata". Since we already do that in
# Image, don't save it to the config here.
if config['blacklevel'] >= 0:
new_config['general']['blacklevel'] = config['blacklevel']
if 'macbeth' in config:
if not self._valid_macbeth_option(config['macbeth']):
raise TypeError('Config "macbeth" must be a dict: {"small": number, "show": number}')
new_config['general']['macbeth'] = config['macbeth']
else:
new_config['general']['macbeth'] = {'small': 0, 'show': 0}
return new_config, disable

View File

@@ -0,0 +1,20 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
# Parser for YAML format config file
from .parser import Parser
import yaml
class YamlParser(Parser):
def __init__(self):
super().__init__()
def parse(self, config_file: str, modules: list) -> (dict, list):
# Dummy implementation that just reads the file
with open(config_file, 'r') as f:
config = yaml.safe_load(f)
return config, []

View File

@@ -0,0 +1,24 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
# Wrapper for cv2 smoothing functions to enable duck-typing
import cv2
# @brief Wrapper for cv2 smoothing functions so that they can be duck-typed
class Smoothing(object):
def __init__(self):
pass
def smoothing(self, src):
raise NotImplementedError
class MedianBlur(Smoothing):
def __init__(self, ksize):
self.ksize = ksize
def smoothing(self, src):
return cv2.medianBlur(src.astype('float32'), self.ksize).astype('float64')

View File

@@ -0,0 +1,186 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
# Utilities for libtuning
import cv2
import decimal
import math
import numpy as np
import os
from pathlib import Path
import re
import sys
import logging
import libtuning as lt
from libtuning.image import Image
from .macbeth import locate_macbeth
logger = logging.getLogger(__name__)
# Utility functions
def get_module_by_type_name(modules, name):
for module in modules:
if module.type == name:
return module
return None
# Private utility functions
def _list_image_files(directory):
d = Path(directory)
files = [d.joinpath(f) for f in os.listdir(d)
if re.search(r'\.(jp[e]g$)|(dng$)', f)]
files.sort()
return files
def _parse_image_filename(fn: Path):
lsc_only = False
color_temperature = None
lux = None
parts = fn.stem.split('_')
for part in parts:
if part == 'alsc':
lsc_only = True
continue
r = re.match(r'(\d+)[kK]', part)
if r:
color_temperature = int(r.group(1))
continue
r = re.match(r'(\d+)[lLuU]', part)
if r:
lux = int(r.group(1))
if color_temperature is None:
logger.error(f'The file name of "{fn.name}" does not contain a color temperature')
if lux is None and lsc_only is False:
logger.error(f'The file name of "{fn.name}" must either contain alsc or a lux level')
return color_temperature, lux, lsc_only
# \todo Implement this from check_imgs() in ctt.py
def _validate_images(images):
return True
# Public utility functions
# @brief Load images into a single list of Image instances
# @param input_dir Directory from which to load image files
# @param config Configuration dictionary
# @param load_nonlsc Whether or not to load non-lsc images
# @param load_lsc Whether or not to load lsc-only images
# @return A list of Image instances
def load_images(input_dir: str, config: dict, load_nonlsc: bool, load_lsc: bool) -> list:
files = _list_image_files(input_dir)
if len(files) == 0:
logger.error(f'No images found in {input_dir}')
return None
images = []
for f in files:
color, lux, lsc_only = _parse_image_filename(f)
if color is None:
logger.warning(f'Ignoring "{f.name}" as it has no associated color temperature')
continue
logger.info(f'Process image "{f.name}" (color={color}, lux={lux}, lsc_only={lsc_only})')
# Skip lsc image if we don't need it
if lsc_only and not load_lsc:
logger.warning(f'Skipping {f.name} as this tuner has no LSC module')
continue
# Skip non-lsc image if we don't need it
if not lsc_only and not load_nonlsc:
logger.warning(f'Skipping {f.name} as this tuner only has an LSC module')
continue
# Load image
try:
image = Image(f)
except Exception as e:
logger.error(f'Failed to load image {f.name}: {e}')
continue
# Populate simple fields
image.lsc_only = lsc_only
image.color = color
image.lux = lux
# Black level comes from the TIFF tags, but they are overridable by the
# config file.
if 'blacklevel' in config['general']:
image.blacklevel_16 = config['general']['blacklevel']
if lsc_only:
images.append(image)
continue
# Handle macbeth
macbeth = locate_macbeth(image, config)
if macbeth is None:
continue
images.append(image)
if not _validate_images(images):
return None
return images
"""
Some code that will save virtual macbeth charts that show the difference between optimised matrices and non optimised matrices
The function creates an image that is 1550 by 1050 pixels wide, and fills it with patches which are 200x200 pixels in size
Each patch contains the ideal color, the color from the original matrix, and the color from the final matrix
_________________
| |
| Ideal Color |
|_______________|
| Old | new |
| Color | Color |
|_______|_______|
Nice way of showing how the optimisation helps change the colors and the color matricies
"""
def visualise_macbeth_chart(macbeth_rgb, original_rgb, new_rgb, output_filename):
image = np.zeros((1050, 1550, 3), dtype=np.uint8)
colorindex = -1
for y in range(6):
for x in range(4): # Creates 6 x 4 grid of macbeth chart
colorindex += 1
xlocation = 50 + 250 * x # Means there is 50px of black gap between each square, more like the real macbeth chart.
ylocation = 50 + 250 * y
for g in range(200):
for i in range(100):
image[xlocation + i, ylocation + g] = macbeth_rgb[colorindex]
xlocation = 150 + 250 * x
ylocation = 50 + 250 * y
for i in range(100):
for g in range(100):
image[xlocation + i, ylocation + g] = original_rgb[colorindex] # Smaller squares below to compare the old colors with the new ones
xlocation = 150 + 250 * x
ylocation = 150 + 250 * y
for i in range(100):
for g in range(100):
image[xlocation + i, ylocation + g] = new_rgb[colorindex]
im_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(f'{output_filename} Generated Macbeth Chart.png', im_bgr)