An attempt at getting image data back

This commit is contained in:
2024-07-14 00:27:33 +02:00
parent e026bc93f7
commit 6452d2e774
1314 changed files with 218350 additions and 38 deletions

View File

@@ -0,0 +1,42 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2022, Raspberry Pi Ltd
#
# alsc tuning tool
import sys
from ctt import *
from ctt_tools import parse_input
if __name__ == '__main__':
"""
initialise calibration
"""
if len(sys.argv) == 1:
print("""
PiSP Lens Shading Camera Tuning Tool version 1.0
Required Arguments:
'-i' : Calibration image directory.
'-o' : Name of output json file.
Optional Arguments:
'-t' : Target platform - 'pisp' or 'vc4'. Default 'vc4'
'-c' : Config file for the CTT. If not passed, default parameters used.
'-l' : Name of output log file. If not passed, 'ctt_log.txt' used.
""")
quit(0)
else:
"""
parse input arguments
"""
json_output, directory, config, log_output, target = parse_input()
if target == 'pisp':
from ctt_pisp import json_template, grid_size
elif target == 'vc4':
from ctt_vc4 import json_template, grid_size
run_ctt(json_output, directory, config, log_output, json_template, grid_size, target, alsc_only=True)

View File

@@ -0,0 +1,142 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2023, Raspberry Pi (Trading) Ltd.
#
# cac_only.py - cac tuning tool
# This file allows you to tune only the chromatic aberration correction
# Specify any number of files in the command line args, and it shall iterate through
# and generate an averaged cac table from all the input images, which you can then
# input into your tuning file.
# Takes .dng files produced by the camera modules of the dots grid and calculates the chromatic abberation of each dot.
# Then takes each dot, and works out where it was in the image, and uses that to output a tables of the shifts
# across the whole image.
from PIL import Image
import numpy as np
import rawpy
import sys
import getopt
from ctt_cac import *
def cac(filelist, output_filepath, plot_results=False):
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
# Create arrays to hold all the dots data and their colour offsets
red_shift = [] # Format is: [[Dot Center X, Dot Center Y, x shift, y shift]]
blue_shift = []
# Iterate through the files
# Multiple files is reccomended to average out the lens aberration through rotations
for file in filelist:
print("\n Processing file " + str(file))
# Read the raw RGB values from the .dng file
with rawpy.imread(file) as raw:
rgb = raw.postprocess()
sizes = (raw.sizes)
image_size = [sizes[2], sizes[3]] # Image size, X, Y
# Create a colour copy of the RGB values to use later in the calibration
imout = Image.new(mode="RGB", size=image_size)
rgb_image = np.array(imout)
# The rgb values need reshaping from a 1d array to a 3d array to be worked with easily
rgb.reshape((image_size[0], image_size[1], 3))
rgb_image = rgb
# Pass the RGB image through to the dots locating program
# Returns an array of the dots (colour rectangles around the dots), and an array of their locations
print("Finding dots")
dots, dots_locations = find_dots_locations(rgb_image)
# Now, analyse each dot. Work out the centroid of each colour channel, and use that to work out
# by how far the chromatic aberration has shifted each channel
print('Dots found: ' + str(len(dots)))
for dot, dot_location in zip(dots, dots_locations):
if len(dot) > 0:
if (dot_location[0] > 0) and (dot_location[1] > 0):
ret = analyse_dot(dot, dot_location)
red_shift.append(ret[0])
blue_shift.append(ret[1])
# Take our arrays of red shifts and locations, push them through to be interpolated into a 9x9 matrix
# for the CAC block to handle and then store these as a .json file to be added to the camera
# tuning file
print("\nCreating output grid")
rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size)
print("CAC correction complete!")
# The json format that we then paste into the tuning file (manually)
sample = '''
{
"rpi.cac" :
{
"strength": 1.0,
"lut_rx" : [
rx_vals
],
"lut_ry" : [
ry_vals
],
"lut_bx" : [
bx_vals
],
"lut_by" : [
by_vals
]
}
}
'''
# Below, may look incorrect, however, the PiSP (standard) dimensions are flipped in comparison to
# PIL image coordinate directions, hence why xr -> yr. Also, the shifts calculated are colour shifts,
# and the PiSP block asks for the values it should shift (hence the * -1, to convert from colour shift to a pixel shift)
sample = sample.replace("rx_vals", pprint_array(ry * -1))
sample = sample.replace("ry_vals", pprint_array(rx * -1))
sample = sample.replace("bx_vals", pprint_array(by * -1))
sample = sample.replace("by_vals", pprint_array(bx * -1))
print("Successfully converted to JSON")
f = open(str(output_filepath), "w+")
f.write(sample)
f.close()
print("Successfully written to json file")
'''
If you wish to see a plot of the colour channel shifts, add the -p or --plots option
Can be a quick way of validating if the data/dots you've got are good, or if you need to
change some parameters/take some better images
'''
if plot_results:
plot_shifts(red_shift, blue_shift)
if __name__ == "__main__":
argv = sys.argv
# Detect the input and output file paths
arg_output = "output.json"
arg_help = "{0} -i <input> -o <output> -p <plot results>".format(argv[0])
opts, args = getopt.getopt(argv[1:], "hi:o:p", ["help", "input=", "output=", "plot"])
output_location = 0
input_location = 0
filelist = []
plot_results = False
for i in range(len(argv)):
if ("-h") in argv[i]:
print(arg_help) # print the help message
sys.exit(2)
if "-o" in argv[i]:
output_location = i
if ".dng" in argv[i]:
filelist.append(argv[i])
if "-p" in argv[i]:
plot_results = True
arg_output = argv[output_location + 1]
cac(filelist, arg_output, plot_results)

View File

@@ -0,0 +1,30 @@
# Program to convert from RGB to LAB color space
def RGB_to_LAB(RGB): # where RGB is a 1x3 array. e.g RGB = [100, 255, 230]
num = 0
XYZ = [0, 0, 0]
# converted all the three R, G, B to X, Y, Z
X = RGB[0] * 0.4124 + RGB[1] * 0.3576 + RGB[2] * 0.1805
Y = RGB[0] * 0.2126 + RGB[1] * 0.7152 + RGB[2] * 0.0722
Z = RGB[0] * 0.0193 + RGB[1] * 0.1192 + RGB[2] * 0.9505
XYZ[0] = X / 255 * 100
XYZ[1] = Y / 255 * 100 # XYZ Must be in range 0 -> 100, so scale down from 255
XYZ[2] = Z / 255 * 100
XYZ[0] = XYZ[0] / 95.047 # ref_X = 95.047 Observer= 2°, Illuminant= D65
XYZ[1] = XYZ[1] / 100.0 # ref_Y = 100.000
XYZ[2] = XYZ[2] / 108.883 # ref_Z = 108.883
num = 0
for value in XYZ:
if value > 0.008856:
value = value ** (0.3333333333333333)
else:
value = (7.787 * value) + (16 / 116)
XYZ[num] = value
num = num + 1
# L, A, B, values calculated below
L = (116 * XYZ[1]) - 16
a = 500 * (XYZ[0] - XYZ[1])
b = 200 * (XYZ[1] - XYZ[2])
return [L, a, b]

View File

@@ -0,0 +1,120 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Script to convert version 1.0 Raspberry Pi camera tuning files to version 2.0.
#
# Copyright 2022 Raspberry Pi Ltd
import argparse
import json
import numpy as np
import sys
from ctt_pretty_print_json import pretty_print
from ctt_pisp import grid_size as grid_size_pisp
from ctt_pisp import json_template as json_template_pisp
from ctt_vc4 import grid_size as grid_size_vc4
from ctt_vc4 import json_template as json_template_vc4
def interp_2d(in_ls, src_w, src_h, dst_w, dst_h):
out_ls = np.zeros((dst_h, dst_w))
for i in range(src_h):
out_ls[i] = np.interp(np.linspace(0, dst_w - 1, dst_w),
np.linspace(0, dst_w - 1, src_w),
in_ls[i])
for i in range(dst_w):
out_ls[:,i] = np.interp(np.linspace(0, dst_h - 1, dst_h),
np.linspace(0, dst_h - 1, src_h),
out_ls[:src_h, i])
return out_ls
def convert_target(in_json: dict, target: str):
src_w, src_h = grid_size_pisp if target == 'vc4' else grid_size_vc4
dst_w, dst_h = grid_size_vc4 if target == 'vc4' else grid_size_pisp
json_template = json_template_vc4 if target == 'vc4' else json_template_pisp
# ALSC grid sizes
alsc = next(algo for algo in in_json['algorithms'] if 'rpi.alsc' in algo)['rpi.alsc']
for colour in ['calibrations_Cr', 'calibrations_Cb']:
if colour not in alsc:
continue
for temperature in alsc[colour]:
in_ls = np.reshape(temperature['table'], (src_h, src_w))
out_ls = interp_2d(in_ls, src_w, src_h, dst_w, dst_h)
temperature['table'] = np.round(out_ls.flatten(), 3).tolist()
if 'luminance_lut' in alsc:
in_ls = np.reshape(alsc['luminance_lut'], (src_h, src_w))
out_ls = interp_2d(in_ls, src_w, src_h, dst_w, dst_h)
alsc['luminance_lut'] = np.round(out_ls.flatten(), 3).tolist()
# Denoise blocks
for i, algo in enumerate(in_json['algorithms']):
if list(algo.keys())[0] == 'rpi.sdn':
in_json['algorithms'][i] = {'rpi.denoise': json_template['rpi.sdn'] if target == 'vc4' else json_template['rpi.denoise']}
break
# AGC mode weights
agc = next(algo for algo in in_json['algorithms'] if 'rpi.agc' in algo)['rpi.agc']
if 'channels' in agc:
for i, channel in enumerate(agc['channels']):
target_agc_metering = json_template['rpi.agc']['channels'][i]['metering_modes']
for mode, v in channel['metering_modes'].items():
v['weights'] = target_agc_metering[mode]['weights']
else:
for mode, v in agc["metering_modes"].items():
target_agc_metering = json_template['rpi.agc']['channels'][0]['metering_modes']
v['weights'] = target_agc_metering[mode]['weights']
# HDR
if target == 'pisp':
for i, algo in enumerate(in_json['algorithms']):
if list(algo.keys())[0] == 'rpi.hdr':
in_json['algorithms'][i] = {'rpi.hdr': json_template['rpi.hdr']}
return in_json
def convert_v2(in_json: dict, target: str) -> str:
if 'version' in in_json.keys() and in_json['version'] == 1.0:
converted = {
'version': 2.0,
'target': target,
'algorithms': [{algo: config} for algo, config in in_json.items()]
}
else:
converted = in_json
# Convert between vc4 <-> pisp targets. This is a best effort thing.
if converted['target'] != target:
converted = convert_target(converted, target)
converted['target'] = target
grid_size = grid_size_vc4[0] if target == 'vc4' else grid_size_pisp[0]
return pretty_print(converted, custom_elems={'table': grid_size, 'luminance_lut': grid_size})
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=
'Convert the format of the Raspberry Pi camera tuning file from v1.0 to v2.0 and/or the vc4 <-> pisp targets.\n')
parser.add_argument('input', type=str, help='Input tuning file.')
parser.add_argument('-t', '--target', type=str, help='Target platform.',
choices=['pisp', 'vc4'], default='vc4')
parser.add_argument('output', type=str, nargs='?',
help='Output converted tuning file. If not provided, the input file will be updated in-place.',
default=None)
args = parser.parse_args()
with open(args.input, 'r') as f:
in_json = json.load(f)
out_json = convert_v2(in_json, args.target)
with open(args.output if args.output is not None else args.input, 'w') as f:
f.write(out_json)

View File

@@ -0,0 +1,802 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool
import os
import sys
from ctt_image_load import *
from ctt_cac import *
from ctt_ccm import *
from ctt_awb import *
from ctt_alsc import *
from ctt_lux import *
from ctt_noise import *
from ctt_geq import *
from ctt_pretty_print_json import pretty_print
import random
import json
import re
"""
This file houses the camera object, which is used to perform the calibrations.
The camera object houses all the calibration images as attributes in three lists:
- imgs (macbeth charts)
- imgs_alsc (alsc correction images)
- imgs_cac (cac correction images)
Various calibrations are methods of the camera object, and the output is stored
in a dictionary called self.json.
Once all the caibration has been completed, the Camera.json is written into a
json file.
The camera object initialises its json dictionary by reading from a pre-written
blank json file. This has been done to avoid reproducing the entire json file
in the code here, thereby avoiding unecessary clutter.
"""
"""
Get the colour and lux values from the strings of each inidvidual image
"""
def get_col_lux(string):
"""
Extract colour and lux values from filename
"""
col = re.search(r'([0-9]+)[kK](\.(jpg|jpeg|brcm|dng)|_.*\.(jpg|jpeg|brcm|dng))$', string)
lux = re.search(r'([0-9]+)[lL](\.(jpg|jpeg|brcm|dng)|_.*\.(jpg|jpeg|brcm|dng))$', string)
try:
col = col.group(1)
except AttributeError:
"""
Catch error if images labelled incorrectly and pass reasonable defaults
"""
return None, None
try:
lux = lux.group(1)
except AttributeError:
"""
Catch error if images labelled incorrectly and pass reasonable defaults
Still returns colour if that has been found.
"""
return col, None
return int(col), int(lux)
"""
Camera object that is the backbone of the tuning tool.
Input is the desired path of the output json.
"""
class Camera:
def __init__(self, jfile, json):
self.path = os.path.dirname(os.path.expanduser(__file__)) + '/'
if self.path == '/':
self.path = ''
self.imgs = []
self.imgs_alsc = []
self.imgs_cac = []
self.log = 'Log created : ' + time.asctime(time.localtime(time.time()))
self.log_separator = '\n'+'-'*70+'\n'
self.jf = jfile
"""
initial json dict populated by uncalibrated values
"""
self.json = json
"""
Perform colour correction calibrations by comparing macbeth patch colours
to standard macbeth chart colours.
"""
def ccm_cal(self, do_alsc_colour, grid_size):
if 'rpi.ccm' in self.disable:
return 1
print('\nStarting CCM calibration')
self.log_new_sec('CCM')
"""
if image is greyscale then CCm makes no sense
"""
if self.grey:
print('\nERROR: Can\'t do CCM on greyscale image!')
self.log += '\nERROR: Cannot perform CCM calibration '
self.log += 'on greyscale image!\nCCM aborted!'
del self.json['rpi.ccm']
return 0
a = time.time()
"""
Check if alsc tables have been generated, if not then do ccm without
alsc
"""
if ("rpi.alsc" not in self.disable) and do_alsc_colour:
"""
case where ALSC colour has been done, so no errors should be
expected...
"""
try:
cal_cr_list = self.json['rpi.alsc']['calibrations_Cr']
cal_cb_list = self.json['rpi.alsc']['calibrations_Cb']
self.log += '\nALSC tables found successfully'
except KeyError:
cal_cr_list, cal_cb_list = None, None
print('WARNING! No ALSC tables found for CCM!')
print('Performing CCM calibrations without ALSC correction...')
self.log += '\nWARNING: No ALSC tables found.\nCCM calibration '
self.log += 'performed without ALSC correction...'
else:
"""
case where config options result in CCM done without ALSC colour tables
"""
cal_cr_list, cal_cb_list = None, None
self.log += '\nWARNING: No ALSC tables found.\nCCM calibration '
self.log += 'performed without ALSC correction...'
"""
Do CCM calibration
"""
try:
ccms = ccm(self, cal_cr_list, cal_cb_list, grid_size)
except ArithmeticError:
print('ERROR: Matrix is singular!\nTake new pictures and try again...')
self.log += '\nERROR: Singular matrix encountered during fit!'
self.log += '\nCCM aborted!'
return 1
"""
Write output to json
"""
self.json['rpi.ccm']['ccms'] = ccms
self.log += '\nCCM calibration written to json file'
print('Finished CCM calibration')
"""
Perform chromatic abberation correction using multiple dots images.
"""
def cac_cal(self, do_alsc_colour):
if 'rpi.cac' in self.disable:
return 1
print('\nStarting CAC calibration')
self.log_new_sec('CAC')
"""
check if cac images have been taken
"""
if len(self.imgs_cac) == 0:
print('\nError:\nNo cac calibration images found')
self.log += '\nERROR: No CAC calibration images found!'
self.log += '\nCAC calibration aborted!'
return 1
"""
if image is greyscale then CAC makes no sense
"""
if self.grey:
print('\nERROR: Can\'t do CAC on greyscale image!')
self.log += '\nERROR: Cannot perform CAC calibration '
self.log += 'on greyscale image!\nCAC aborted!'
del self.json['rpi.cac']
return 0
a = time.time()
"""
Check if camera is greyscale or color. If not greyscale, then perform cac
"""
if do_alsc_colour:
"""
Here we have a color sensor. Perform cac
"""
try:
cacs = cac(self)
except ArithmeticError:
print('ERROR: Matrix is singular!\nTake new pictures and try again...')
self.log += '\nERROR: Singular matrix encountered during fit!'
self.log += '\nCAC aborted!'
return 1
else:
"""
case where config options suggest greyscale camera. No point in doing CAC
"""
cal_cr_list, cal_cb_list = None, None
self.log += '\nWARNING: No ALSC tables found.\nCAC calibration '
self.log += 'performed without ALSC correction...'
"""
Write output to json
"""
self.json['rpi.cac']['cac'] = cacs
self.log += '\nCAC calibration written to json file'
print('Finished CAC calibration')
"""
Auto white balance calibration produces a colour curve for
various colour temperatures, as well as providing a maximum 'wiggle room'
distance from this curve (transverse_neg/pos).
"""
def awb_cal(self, greyworld, do_alsc_colour, grid_size):
if 'rpi.awb' in self.disable:
return 1
print('\nStarting AWB calibration')
self.log_new_sec('AWB')
"""
if image is greyscale then AWB makes no sense
"""
if self.grey:
print('\nERROR: Can\'t do AWB on greyscale image!')
self.log += '\nERROR: Cannot perform AWB calibration '
self.log += 'on greyscale image!\nAWB aborted!'
del self.json['rpi.awb']
return 0
"""
optional set greyworld (e.g. for noir cameras)
"""
if greyworld:
self.json['rpi.awb']['bayes'] = 0
self.log += '\nGreyworld set'
"""
Check if alsc tables have been generated, if not then do awb without
alsc correction
"""
if ("rpi.alsc" not in self.disable) and do_alsc_colour:
try:
cal_cr_list = self.json['rpi.alsc']['calibrations_Cr']
cal_cb_list = self.json['rpi.alsc']['calibrations_Cb']
self.log += '\nALSC tables found successfully'
except KeyError:
cal_cr_list, cal_cb_list = None, None
print('ERROR, no ALSC calibrations found for AWB')
print('Performing AWB without ALSC tables')
self.log += '\nWARNING: No ALSC tables found.\nAWB calibration '
self.log += 'performed without ALSC correction...'
else:
cal_cr_list, cal_cb_list = None, None
self.log += '\nWARNING: No ALSC tables found.\nAWB calibration '
self.log += 'performed without ALSC correction...'
"""
call calibration function
"""
plot = "rpi.awb" in self.plot
awb_out = awb(self, cal_cr_list, cal_cb_list, plot, grid_size)
ct_curve, transverse_neg, transverse_pos = awb_out
"""
write output to json
"""
self.json['rpi.awb']['ct_curve'] = ct_curve
self.json['rpi.awb']['sensitivity_r'] = 1.0
self.json['rpi.awb']['sensitivity_b'] = 1.0
self.json['rpi.awb']['transverse_pos'] = transverse_pos
self.json['rpi.awb']['transverse_neg'] = transverse_neg
self.log += '\nAWB calibration written to json file'
print('Finished AWB calibration')
"""
Auto lens shading correction completely mitigates the effects of lens shading for ech
colour channel seperately, and then partially corrects for vignetting.
The extent of the correction depends on the 'luminance_strength' parameter.
"""
def alsc_cal(self, luminance_strength, do_alsc_colour, grid_size, max_gain=8.0):
if 'rpi.alsc' in self.disable:
return 1
print('\nStarting ALSC calibration')
self.log_new_sec('ALSC')
"""
check if alsc images have been taken
"""
if len(self.imgs_alsc) == 0:
print('\nError:\nNo alsc calibration images found')
self.log += '\nERROR: No ALSC calibration images found!'
self.log += '\nALSC calibration aborted!'
return 1
self.json['rpi.alsc']['luminance_strength'] = luminance_strength
if self.grey and do_alsc_colour:
print('Greyscale camera so only luminance_lut calculated')
do_alsc_colour = False
self.log += '\nWARNING: ALSC colour correction cannot be done on '
self.log += 'greyscale image!\nALSC colour corrections forced off!'
"""
call calibration function
"""
plot = "rpi.alsc" in self.plot
alsc_out = alsc_all(self, do_alsc_colour, plot, grid_size, max_gain=max_gain)
cal_cr_list, cal_cb_list, luminance_lut, av_corn = alsc_out
"""
write output to json and finish if not do_alsc_colour
"""
if not do_alsc_colour:
self.json['rpi.alsc']['luminance_lut'] = luminance_lut
self.json['rpi.alsc']['n_iter'] = 0
self.log += '\nALSC calibrations written to json file'
self.log += '\nNo colour calibrations performed'
print('Finished ALSC calibrations')
return 1
self.json['rpi.alsc']['calibrations_Cr'] = cal_cr_list
self.json['rpi.alsc']['calibrations_Cb'] = cal_cb_list
self.json['rpi.alsc']['luminance_lut'] = luminance_lut
self.log += '\nALSC colour and luminance tables written to json file'
"""
The sigmas determine the strength of the adaptive algorithm, that
cleans up any lens shading that has slipped through the alsc. These are
determined by measuring a 'worst-case' difference between two alsc tables
that are adjacent in colour space. If, however, only one colour
temperature has been provided, then this difference can not be computed
as only one table is available.
To determine the sigmas you would have to estimate the error of an alsc
table with only the image it was taken on as a check. To avoid circularity,
dfault exaggerated sigmas are used, which can result in too much alsc and
is therefore not advised.
In general, just take another alsc picture at another colour temperature!
"""
if len(self.imgs_alsc) == 1:
self.json['rpi.alsc']['sigma'] = 0.005
self.json['rpi.alsc']['sigma_Cb'] = 0.005
print('\nWarning:\nOnly one alsc calibration found'
'\nStandard sigmas used for adaptive algorithm.')
print('Finished ALSC calibrations')
self.log += '\nWARNING: Only one colour temperature found in '
self.log += 'calibration images.\nStandard sigmas used for adaptive '
self.log += 'algorithm!'
return 1
"""
obtain worst-case scenario residual sigmas
"""
sigma_r, sigma_b = get_sigma(self, cal_cr_list, cal_cb_list, grid_size)
"""
write output to json
"""
self.json['rpi.alsc']['sigma'] = np.round(sigma_r, 5)
self.json['rpi.alsc']['sigma_Cb'] = np.round(sigma_b, 5)
self.log += '\nCalibrated sigmas written to json file'
print('Finished ALSC calibrations')
"""
Green equalisation fixes problems caused by discrepancies in green
channels. This is done by measuring the effect on macbeth chart patches,
which ideally would have the same green values throughout.
An upper bound linear model is fit, fixing a threshold for the green
differences that are corrected.
"""
def geq_cal(self):
if 'rpi.geq' in self.disable:
return 1
print('\nStarting GEQ calibrations')
self.log_new_sec('GEQ')
"""
perform calibration
"""
plot = 'rpi.geq' in self.plot
slope, offset = geq_fit(self, plot)
"""
write output to json
"""
self.json['rpi.geq']['offset'] = offset
self.json['rpi.geq']['slope'] = slope
self.log += '\nGEQ calibrations written to json file'
print('Finished GEQ calibrations')
"""
Lux calibrations allow the lux level of a scene to be estimated by a ratio
calculation. Lux values are used in the pipeline for algorithms such as AGC
and AWB
"""
def lux_cal(self):
if 'rpi.lux' in self.disable:
return 1
print('\nStarting LUX calibrations')
self.log_new_sec('LUX')
"""
The lux calibration is done on a single image. For best effects, the
image with lux level closest to 1000 is chosen.
"""
luxes = [Img.lux for Img in self.imgs]
argmax = luxes.index(min(luxes, key=lambda l: abs(1000-l)))
Img = self.imgs[argmax]
self.log += '\nLux found closest to 1000: {} lx'.format(Img.lux)
self.log += '\nImage used: ' + Img.name
if Img.lux < 50:
self.log += '\nWARNING: Low lux could cause inaccurate calibrations!'
"""
do calibration
"""
lux_out, shutter_speed, gain = lux(self, Img)
"""
write output to json
"""
self.json['rpi.lux']['reference_shutter_speed'] = shutter_speed
self.json['rpi.lux']['reference_gain'] = gain
self.json['rpi.lux']['reference_lux'] = Img.lux
self.json['rpi.lux']['reference_Y'] = lux_out
self.log += '\nLUX calibrations written to json file'
print('Finished LUX calibrations')
"""
Noise alibration attempts to describe the noise profile of the sensor. The
calibration is run on macbeth images and the final output is taken as the average
"""
def noise_cal(self):
if 'rpi.noise' in self.disable:
return 1
print('\nStarting NOISE calibrations')
self.log_new_sec('NOISE')
"""
run calibration on all images and sort by slope.
"""
plot = "rpi.noise" in self.plot
noise_out = sorted([noise(self, Img, plot) for Img in self.imgs], key=lambda x: x[0])
self.log += '\nFinished processing images'
"""
take the average of the interquartile
"""
length = len(noise_out)
noise_out = np.mean(noise_out[length//4:1+3*length//4], axis=0)
self.log += '\nAverage noise profile: constant = {} '.format(int(noise_out[1]))
self.log += 'slope = {:.3f}'.format(noise_out[0])
"""
write to json
"""
self.json['rpi.noise']['reference_constant'] = int(noise_out[1])
self.json['rpi.noise']['reference_slope'] = round(noise_out[0], 3)
self.log += '\nNOISE calibrations written to json'
print('Finished NOISE calibrations')
"""
Removes json entries that are turned off
"""
def json_remove(self, disable):
self.log_new_sec('Disabling Options', cal=False)
if len(self.disable) == 0:
self.log += '\nNothing disabled!'
return 1
for key in disable:
try:
del self.json[key]
self.log += '\nDisabled: ' + key
except KeyError:
self.log += '\nERROR: ' + key + ' not found!'
"""
writes the json dictionary to the raw json file then make pretty
"""
def write_json(self, version=2.0, target='bcm2835', grid_size=(16, 12)):
"""
Write json dictionary to file using our version 2 format
"""
out_json = {
"version": version,
'target': target if target != 'vc4' else 'bcm2835',
"algorithms": [{name: data} for name, data in self.json.items()],
}
with open(self.jf, 'w') as f:
f.write(pretty_print(out_json,
custom_elems={'table': grid_size[0], 'luminance_lut': grid_size[0]}))
"""
add a new section to the log file
"""
def log_new_sec(self, section, cal=True):
self.log += '\n'+self.log_separator
self.log += section
if cal:
self.log += ' Calibration'
self.log += self.log_separator
"""
write script arguments to log file
"""
def log_user_input(self, json_output, directory, config, log_output):
self.log_new_sec('User Arguments', cal=False)
self.log += '\nJson file output: ' + json_output
self.log += '\nCalibration images directory: ' + directory
if config is None:
self.log += '\nNo configuration file input... using default options'
elif config is False:
self.log += '\nWARNING: Invalid configuration file path...'
self.log += ' using default options'
elif config is True:
self.log += '\nWARNING: Invalid syntax in configuration file...'
self.log += ' using default options'
else:
self.log += '\nConfiguration file: ' + config
if log_output is None:
self.log += '\nNo log file path input... using default: ctt_log.txt'
else:
self.log += '\nLog file output: ' + log_output
# if log_output
"""
write log file
"""
def write_log(self, filename):
if filename is None:
filename = 'ctt_log.txt'
self.log += '\n' + self.log_separator
with open(filename, 'w') as logfile:
logfile.write(self.log)
"""
Add all images from directory, pass into relevant list of images and
extrace lux and temperature values.
"""
def add_imgs(self, directory, mac_config, blacklevel=-1):
self.log_new_sec('Image Loading', cal=False)
img_suc_msg = 'Image loaded successfully!'
print('\n\nLoading images from '+directory)
self.log += '\nDirectory: ' + directory
"""
get list of files
"""
filename_list = get_photos(directory)
print("Files found: {}".format(len(filename_list)))
self.log += '\nFiles found: {}'.format(len(filename_list))
"""
iterate over files
"""
filename_list.sort()
for filename in filename_list:
address = directory + filename
print('\nLoading image: '+filename)
self.log += '\n\nImage: ' + filename
"""
obtain colour and lux value
"""
col, lux = get_col_lux(filename)
"""
Check if image is an alsc calibration image
"""
if 'alsc' in filename:
Img = load_image(self, address, mac=False)
self.log += '\nIdentified as an ALSC image'
"""
check if imagae data has been successfully unpacked
"""
if Img == 0:
print('\nDISCARDED')
self.log += '\nImage discarded!'
continue
"""
check that image colour temperature has been successfuly obtained
"""
elif col is not None:
"""
if successful, append to list and continue to next image
"""
Img.col = col
Img.name = filename
self.log += '\nColour temperature: {} K'.format(col)
self.imgs_alsc.append(Img)
if blacklevel != -1:
Img.blacklevel_16 = blacklevel
print(img_suc_msg)
continue
else:
print('Error! No colour temperature found!')
self.log += '\nWARNING: Error reading colour temperature'
self.log += '\nImage discarded!'
print('DISCARDED')
elif 'cac' in filename:
Img = load_image(self, address, mac=False)
self.log += '\nIdentified as an CAC image'
Img.name = filename
self.log += '\nColour temperature: {} K'.format(col)
self.imgs_cac.append(Img)
if blacklevel != -1:
Img.blacklevel_16 = blacklevel
print(img_suc_msg)
continue
else:
self.log += '\nIdentified as macbeth chart image'
"""
if image isn't an alsc correction then it must have a lux and a
colour temperature value to be useful
"""
if lux is None:
print('DISCARDED')
self.log += '\nWARNING: Error reading lux value'
self.log += '\nImage discarded!'
continue
Img = load_image(self, address, mac_config)
"""
check that image data has been successfuly unpacked
"""
if Img == 0:
print('DISCARDED')
self.log += '\nImage discarded!'
continue
else:
"""
if successful, append to list and continue to next image
"""
Img.col, Img.lux = col, lux
Img.name = filename
self.log += '\nColour temperature: {} K'.format(col)
self.log += '\nLux value: {} lx'.format(lux)
if blacklevel != -1:
Img.blacklevel_16 = blacklevel
print(img_suc_msg)
self.imgs.append(Img)
print('\nFinished loading images')
"""
Check that usable images have been found
Possible errors include:
- no macbeth chart
- incorrect filename/extension
- images from different cameras
"""
def check_imgs(self, macbeth=True):
self.log += '\n\nImages found:'
self.log += '\nMacbeth : {}'.format(len(self.imgs))
self.log += '\nALSC : {} '.format(len(self.imgs_alsc))
self.log += '\nCAC: {} '.format(len(self.imgs_cac))
self.log += '\n\nCamera metadata'
"""
check usable images found
"""
if len(self.imgs) == 0 and macbeth:
print('\nERROR: No usable macbeth chart images found')
self.log += '\nERROR: No usable macbeth chart images found'
return 0
elif len(self.imgs) == 0 and len(self.imgs_alsc) == 0 and len(self.imgs_cac) == 0:
print('\nERROR: No usable images found')
self.log += '\nERROR: No usable images found'
return 0
"""
Double check that every image has come from the same camera...
"""
all_imgs = self.imgs + self.imgs_alsc + self.imgs_cac
camNames = list(set([Img.camName for Img in all_imgs]))
patterns = list(set([Img.pattern for Img in all_imgs]))
sigbitss = list(set([Img.sigbits for Img in all_imgs]))
blacklevels = list(set([Img.blacklevel_16 for Img in all_imgs]))
sizes = list(set([(Img.w, Img.h) for Img in all_imgs]))
if 1:
self.grey = (patterns[0] == 128)
self.blacklevel_16 = blacklevels[0]
self.log += '\nName: {}'.format(camNames[0])
self.log += '\nBayer pattern case: {}'.format(patterns[0])
if self.grey:
self.log += '\nGreyscale camera identified'
self.log += '\nSignificant bits: {}'.format(sigbitss[0])
self.log += '\nBlacklevel: {}'.format(blacklevels[0])
self.log += '\nImage size: w = {} h = {}'.format(sizes[0][0], sizes[0][1])
return 1
else:
print('\nERROR: Images from different cameras')
self.log += '\nERROR: Images are from different cameras'
return 0
def run_ctt(json_output, directory, config, log_output, json_template, grid_size, target, alsc_only=False):
"""
check input files are jsons
"""
if json_output[-5:] != '.json':
raise ArgError('\n\nError: Output must be a json file!')
if config is not None:
"""
check if config file is actually a json
"""
if config[-5:] != '.json':
raise ArgError('\n\nError: Config file must be a json file!')
"""
read configurations
"""
try:
with open(config, 'r') as config_json:
configs = json.load(config_json)
except FileNotFoundError:
configs = {}
config = False
except json.decoder.JSONDecodeError:
configs = {}
config = True
else:
configs = {}
"""
load configurations from config file, if not given then set default
"""
disable = get_config(configs, "disable", [], 'list')
plot = get_config(configs, "plot", [], 'list')
awb_d = get_config(configs, "awb", {}, 'dict')
greyworld = get_config(awb_d, "greyworld", 0, 'bool')
alsc_d = get_config(configs, "alsc", {}, 'dict')
do_alsc_colour = get_config(alsc_d, "do_alsc_colour", 1, 'bool')
luminance_strength = get_config(alsc_d, "luminance_strength", 0.8, 'num')
lsc_max_gain = get_config(alsc_d, "max_gain", 8.0, 'num')
blacklevel = get_config(configs, "blacklevel", -1, 'num')
macbeth_d = get_config(configs, "macbeth", {}, 'dict')
mac_small = get_config(macbeth_d, "small", 0, 'bool')
mac_show = get_config(macbeth_d, "show", 0, 'bool')
mac_config = (mac_small, mac_show)
print("Read lsc_max_gain", lsc_max_gain)
if blacklevel < -1 or blacklevel >= 2**16:
print('\nInvalid blacklevel, defaulted to 64')
blacklevel = -1
if luminance_strength < 0 or luminance_strength > 1:
print('\nInvalid luminance_strength strength, defaulted to 0.5')
luminance_strength = 0.5
"""
sanitise directory path
"""
if directory[-1] != '/':
directory += '/'
"""
initialise tuning tool and load images
"""
try:
Cam = Camera(json_output, json=json_template)
Cam.log_user_input(json_output, directory, config, log_output)
if alsc_only:
disable = set(Cam.json.keys()).symmetric_difference({"rpi.alsc"})
Cam.disable = disable
Cam.plot = plot
Cam.add_imgs(directory, mac_config, blacklevel)
except FileNotFoundError:
raise ArgError('\n\nError: Input image directory not found!')
"""
preform calibrations as long as check_imgs returns True
If alsc is activated then it must be done before awb and ccm since the alsc
tables are used in awb and ccm calibrations
ccm also technically does an awb but it measures this from the macbeth
chart in the image rather than using calibration data
"""
if Cam.check_imgs(macbeth=not alsc_only):
if not alsc_only:
Cam.json['rpi.black_level']['black_level'] = Cam.blacklevel_16
Cam.json_remove(disable)
print('\nSTARTING CALIBRATIONS')
Cam.alsc_cal(luminance_strength, do_alsc_colour, grid_size, max_gain=lsc_max_gain)
Cam.geq_cal()
Cam.lux_cal()
Cam.noise_cal()
if "rpi.cac" in json_template:
Cam.cac_cal(do_alsc_colour)
Cam.awb_cal(greyworld, do_alsc_colour, grid_size)
Cam.ccm_cal(do_alsc_colour, grid_size)
print('\nFINISHED CALIBRATIONS')
Cam.write_json(target=target, grid_size=grid_size)
Cam.write_log(log_output)
print('\nCalibrations written to: '+json_output)
if log_output is None:
log_output = 'ctt_log.txt'
print('Log file written to: '+log_output)
pass
else:
Cam.write_log(log_output)
if __name__ == '__main__':
"""
initialise calibration
"""
if len(sys.argv) == 1:
print("""
PiSP Tuning Tool version 1.0
Required Arguments:
'-i' : Calibration image directory.
'-o' : Name of output json file.
Optional Arguments:
'-t' : Target platform - 'pisp' or 'vc4'. Default 'vc4'
'-c' : Config file for the CTT. If not passed, default parameters used.
'-l' : Name of output log file. If not passed, 'ctt_log.txt' used.
""")
quit(0)
else:
"""
parse input arguments
"""
json_output, directory, config, log_output, target = parse_input()
if target == 'pisp':
from ctt_pisp import json_template, grid_size
elif target == 'vc4':
from ctt_vc4 import json_template, grid_size
run_ctt(json_output, directory, config, log_output, json_template, grid_size, target)

View File

@@ -0,0 +1,308 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool for ALSC (auto lens shading correction)
from ctt_image_load import *
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
"""
preform alsc calibration on a set of images
"""
def alsc_all(Cam, do_alsc_colour, plot, grid_size=(16, 12), max_gain=8.0):
imgs_alsc = Cam.imgs_alsc
grid_w, grid_h = grid_size
"""
create list of colour temperatures and associated calibration tables
"""
list_col = []
list_cr = []
list_cb = []
list_cg = []
for Img in imgs_alsc:
col, cr, cb, cg, size = alsc(Cam, Img, do_alsc_colour, plot, grid_size=grid_size, max_gain=max_gain)
list_col.append(col)
list_cr.append(cr)
list_cb.append(cb)
list_cg.append(cg)
Cam.log += '\n'
Cam.log += '\nFinished processing images'
w, h, dx, dy = size
Cam.log += '\nChannel dimensions: w = {} h = {}'.format(int(w), int(h))
Cam.log += '\n16x12 grid rectangle size: w = {} h = {}'.format(dx, dy)
"""
convert to numpy array for data manipulation
"""
list_col = np.array(list_col)
list_cr = np.array(list_cr)
list_cb = np.array(list_cb)
list_cg = np.array(list_cg)
cal_cr_list = []
cal_cb_list = []
"""
only do colour calculations if required
"""
if do_alsc_colour:
Cam.log += '\nALSC colour tables'
for ct in sorted(set(list_col)):
Cam.log += '\nColour temperature: {} K'.format(ct)
"""
average tables for the same colour temperature
"""
indices = np.where(list_col == ct)
ct = int(ct)
t_r = np.mean(list_cr[indices], axis=0)
t_b = np.mean(list_cb[indices], axis=0)
"""
force numbers to be stored to 3dp.... :(
"""
t_r = np.where((100*t_r) % 1 <= 0.05, t_r+0.001, t_r)
t_b = np.where((100*t_b) % 1 <= 0.05, t_b+0.001, t_b)
t_r = np.where((100*t_r) % 1 >= 0.95, t_r-0.001, t_r)
t_b = np.where((100*t_b) % 1 >= 0.95, t_b-0.001, t_b)
t_r = np.round(t_r, 3)
t_b = np.round(t_b, 3)
r_corners = (t_r[0], t_r[grid_w - 1], t_r[-1], t_r[-grid_w])
b_corners = (t_b[0], t_b[grid_w - 1], t_b[-1], t_b[-grid_w])
middle_pos = (grid_h // 2 - 1) * grid_w + grid_w - 1
r_cen = t_r[middle_pos]+t_r[middle_pos + 1]+t_r[middle_pos + grid_w]+t_r[middle_pos + grid_w + 1]
r_cen = round(r_cen/4, 3)
b_cen = t_b[middle_pos]+t_b[middle_pos + 1]+t_b[middle_pos + grid_w]+t_b[middle_pos + grid_w + 1]
b_cen = round(b_cen/4, 3)
Cam.log += '\nRed table corners: {}'.format(r_corners)
Cam.log += '\nRed table centre: {}'.format(r_cen)
Cam.log += '\nBlue table corners: {}'.format(b_corners)
Cam.log += '\nBlue table centre: {}'.format(b_cen)
cr_dict = {
'ct': ct,
'table': list(t_r)
}
cb_dict = {
'ct': ct,
'table': list(t_b)
}
cal_cr_list.append(cr_dict)
cal_cb_list.append(cb_dict)
Cam.log += '\n'
else:
cal_cr_list, cal_cb_list = None, None
"""
average all values for luminance shading and return one table for all temperatures
"""
lum_lut = np.mean(list_cg, axis=0)
lum_lut = np.where((100*lum_lut) % 1 <= 0.05, lum_lut+0.001, lum_lut)
lum_lut = np.where((100*lum_lut) % 1 >= 0.95, lum_lut-0.001, lum_lut)
lum_lut = list(np.round(lum_lut, 3))
"""
calculate average corner for lsc gain calculation further on
"""
corners = (lum_lut[0], lum_lut[15], lum_lut[-1], lum_lut[-16])
Cam.log += '\nLuminance table corners: {}'.format(corners)
l_cen = lum_lut[5*16+7]+lum_lut[5*16+8]+lum_lut[6*16+7]+lum_lut[6*16+8]
l_cen = round(l_cen/4, 3)
Cam.log += '\nLuminance table centre: {}'.format(l_cen)
av_corn = np.sum(corners)/4
return cal_cr_list, cal_cb_list, lum_lut, av_corn
"""
calculate g/r and g/b for 32x32 points arranged in a grid for a single image
"""
def alsc(Cam, Img, do_alsc_colour, plot=False, grid_size=(16, 12), max_gain=8.0):
Cam.log += '\nProcessing image: ' + Img.name
grid_w, grid_h = grid_size
"""
get channel in correct order
"""
channels = [Img.channels[i] for i in Img.order]
"""
calculate size of single rectangle.
-(-(w-1)//32) is a ceiling division. w-1 is to deal robustly with the case
where w is a multiple of 32.
"""
w, h = Img.w/2, Img.h/2
dx, dy = int(-(-(w-1)//grid_w)), int(-(-(h-1)//grid_h))
"""
average the green channels into one
"""
av_ch_g = np.mean((channels[1:3]), axis=0)
if do_alsc_colour:
"""
obtain grid_w x grid_h grid of intensities for each channel and subtract black level
"""
g = get_grid(av_ch_g, dx, dy, grid_size) - Img.blacklevel_16
r = get_grid(channels[0], dx, dy, grid_size) - Img.blacklevel_16
b = get_grid(channels[3], dx, dy, grid_size) - Img.blacklevel_16
"""
calculate ratios as 32 bit in order to be supported by medianBlur function
"""
cr = np.reshape(g/r, (grid_h, grid_w)).astype('float32')
cb = np.reshape(g/b, (grid_h, grid_w)).astype('float32')
cg = np.reshape(1/g, (grid_h, grid_w)).astype('float32')
"""
median blur to remove peaks and save as float 64
"""
cr = cv2.medianBlur(cr, 3).astype('float64')
cr = cr/np.min(cr) # gain tables are easier for humans to read if the minimum is 1.0
cb = cv2.medianBlur(cb, 3).astype('float64')
cb = cb/np.min(cb)
cg = cv2.medianBlur(cg, 3).astype('float64')
cg = cg/np.min(cg)
cg = [min(v, max_gain) for v in cg.flatten()] # never exceed the max luminance gain
"""
debugging code showing 2D surface plot of vignetting. Quite useful for
for sanity check
"""
if plot:
hf = plt.figure(figsize=(8, 8))
ha = hf.add_subplot(311, projection='3d')
"""
note Y is plotted as -Y so plot has same axes as image
"""
X, Y = np.meshgrid(range(grid_w), range(grid_h))
ha.plot_surface(X, -Y, cr, cmap=cm.coolwarm, linewidth=0)
ha.set_title('ALSC Plot\nImg: {}\n\ncr'.format(Img.str))
hb = hf.add_subplot(312, projection='3d')
hb.plot_surface(X, -Y, cb, cmap=cm.coolwarm, linewidth=0)
hb.set_title('cb')
hc = hf.add_subplot(313, projection='3d')
hc.plot_surface(X, -Y, cg, cmap=cm.coolwarm, linewidth=0)
hc.set_title('g')
# print(Img.str)
plt.show()
return Img.col, cr.flatten(), cb.flatten(), cg, (w, h, dx, dy)
else:
"""
only perform calculations for luminance shading
"""
g = get_grid(av_ch_g, dx, dy, grid_size) - Img.blacklevel_16
cg = np.reshape(1/g, (grid_h, grid_w)).astype('float32')
cg = cv2.medianBlur(cg, 3).astype('float64')
cg = cg/np.min(cg)
cg = [min(v, max_gain) for v in cg.flatten()] # never exceed the max luminance gain
if plot:
hf = plt.figure(figssize=(8, 8))
ha = hf.add_subplot(1, 1, 1, projection='3d')
X, Y = np.meashgrid(range(grid_w), range(grid_h))
ha.plot_surface(X, -Y, cg, cmap=cm.coolwarm, linewidth=0)
ha.set_title('ALSC Plot (Luminance only!)\nImg: {}\n\ncg').format(Img.str)
plt.show()
return Img.col, None, None, cg.flatten(), (w, h, dx, dy)
"""
Compresses channel down to a grid of the requested size
"""
def get_grid(chan, dx, dy, grid_size):
grid_w, grid_h = grid_size
grid = []
"""
since left and bottom border will not necessarily have rectangles of
dimension dx x dy, the 32nd iteration has to be handled separately.
"""
for i in range(grid_h - 1):
for j in range(grid_w - 1):
grid.append(np.mean(chan[dy*i:dy*(1+i), dx*j:dx*(1+j)]))
grid.append(np.mean(chan[dy*i:dy*(1+i), (grid_w - 1)*dx:]))
for j in range(grid_w - 1):
grid.append(np.mean(chan[(grid_h - 1)*dy:, dx*j:dx*(1+j)]))
grid.append(np.mean(chan[(grid_h - 1)*dy:, (grid_w - 1)*dx:]))
"""
return as np.array, ready for further manipulation
"""
return np.array(grid)
"""
obtains sigmas for red and blue, effectively a measure of the 'error'
"""
def get_sigma(Cam, cal_cr_list, cal_cb_list, grid_size):
Cam.log += '\nCalculating sigmas'
"""
provided colour alsc tables were generated for two different colour
temperatures sigma is calculated by comparing two calibration temperatures
adjacent in colour space
"""
"""
create list of colour temperatures
"""
cts = [cal['ct'] for cal in cal_cr_list]
# print(cts)
"""
calculate sigmas for each adjacent cts and return worst one
"""
sigma_rs = []
sigma_bs = []
for i in range(len(cts)-1):
sigma_rs.append(calc_sigma(cal_cr_list[i]['table'], cal_cr_list[i+1]['table'], grid_size))
sigma_bs.append(calc_sigma(cal_cb_list[i]['table'], cal_cb_list[i+1]['table'], grid_size))
Cam.log += '\nColour temperature interval {} - {} K'.format(cts[i], cts[i+1])
Cam.log += '\nSigma red: {}'.format(sigma_rs[-1])
Cam.log += '\nSigma blue: {}'.format(sigma_bs[-1])
"""
return maximum sigmas, not necessarily from the same colour temperature
interval
"""
sigma_r = max(sigma_rs) if sigma_rs else 0.005
sigma_b = max(sigma_bs) if sigma_bs else 0.005
Cam.log += '\nMaximum sigmas: Red = {} Blue = {}'.format(sigma_r, sigma_b)
# print(sigma_rs, sigma_bs)
# print(sigma_r, sigma_b)
return sigma_r, sigma_b
"""
calculate sigma from two adjacent gain tables
"""
def calc_sigma(g1, g2, grid_size):
grid_w, grid_h = grid_size
"""
reshape into 16x12 matrix
"""
g1 = np.reshape(g1, (grid_h, grid_w))
g2 = np.reshape(g2, (grid_h, grid_w))
"""
apply gains to gain table
"""
gg = g1/g2
if np.mean(gg) < 1:
gg = 1/gg
"""
for each internal patch, compute average difference between it and its 4
neighbours, then append to list
"""
diffs = []
for i in range(grid_h - 2):
for j in range(grid_w - 2):
"""
note indexing is incremented by 1 since all patches on borders are
not counted
"""
diff = np.abs(gg[i+1][j+1]-gg[i][j+1])
diff += np.abs(gg[i+1][j+1]-gg[i+2][j+1])
diff += np.abs(gg[i+1][j+1]-gg[i+1][j])
diff += np.abs(gg[i+1][j+1]-gg[i+1][j+2])
diffs.append(diff/4)
"""
return mean difference
"""
mean_diff = np.mean(diffs)
return(np.round(mean_diff, 5))

View File

@@ -0,0 +1,377 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool for AWB
from ctt_image_load import *
import matplotlib.pyplot as plt
from bisect import bisect_left
from scipy.optimize import fmin
"""
obtain piecewise linear approximation for colour curve
"""
def awb(Cam, cal_cr_list, cal_cb_list, plot, grid_size):
imgs = Cam.imgs
"""
condense alsc calibration tables into one dictionary
"""
if cal_cr_list is None:
colour_cals = None
else:
colour_cals = {}
for cr, cb in zip(cal_cr_list, cal_cb_list):
cr_tab = cr['table']
cb_tab = cb['table']
"""
normalise tables so min value is 1
"""
cr_tab = cr_tab/np.min(cr_tab)
cb_tab = cb_tab/np.min(cb_tab)
colour_cals[cr['ct']] = [cr_tab, cb_tab]
"""
obtain data from greyscale macbeth patches
"""
rb_raw = []
rbs_hat = []
for Img in imgs:
Cam.log += '\nProcessing '+Img.name
"""
get greyscale patches with alsc applied if alsc enabled.
Note: if alsc is disabled then colour_cals will be set to None and the
function will just return the greyscale patches
"""
r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals, grid_size=grid_size)
"""
calculate ratio of r, b to g
"""
r_g = np.mean(r_patchs/g_patchs)
b_g = np.mean(b_patchs/g_patchs)
Cam.log += '\n r : {:.4f} b : {:.4f}'.format(r_g, b_g)
"""
The curve tends to be better behaved in so-called hatspace.
R, B, G represent the individual channels. The colour curve is plotted in
r, b space, where:
r = R/G
b = B/G
This will be referred to as dehatspace... (sorry)
Hatspace is defined as:
r_hat = R/(R+B+G)
b_hat = B/(R+B+G)
To convert from dehatspace to hastpace (hat operation):
r_hat = r/(1+r+b)
b_hat = b/(1+r+b)
To convert from hatspace to dehatspace (dehat operation):
r = r_hat/(1-r_hat-b_hat)
b = b_hat/(1-r_hat-b_hat)
Proof is left as an excercise to the reader...
Throughout the code, r and b are sometimes referred to as r_g and b_g
as a reminder that they are ratios
"""
r_g_hat = r_g/(1+r_g+b_g)
b_g_hat = b_g/(1+r_g+b_g)
Cam.log += '\n r_hat : {:.4f} b_hat : {:.4f}'.format(r_g_hat, b_g_hat)
rbs_hat.append((r_g_hat, b_g_hat, Img.col))
rb_raw.append((r_g, b_g))
Cam.log += '\n'
Cam.log += '\nFinished processing images'
"""
sort all lits simultaneously by r_hat
"""
rbs_zip = list(zip(rbs_hat, rb_raw))
rbs_zip.sort(key=lambda x: x[0][0])
rbs_hat, rb_raw = list(zip(*rbs_zip))
"""
unzip tuples ready for processing
"""
rbs_hat = list(zip(*rbs_hat))
rb_raw = list(zip(*rb_raw))
"""
fit quadratic fit to r_g hat and b_g_hat
"""
a, b, c = np.polyfit(rbs_hat[0], rbs_hat[1], 2)
Cam.log += '\nFit quadratic curve in hatspace'
"""
the algorithm now approximates the shortest distance from each point to the
curve in dehatspace. Since the fit is done in hatspace, it is easier to
find the actual shortest distance in hatspace and use the projection back
into dehatspace as an overestimate.
The distance will be used for two things:
1) In the case that colour temperature does not strictly decrease with
increasing r/g, the closest point to the line will be chosen out of an
increasing pair of colours.
2) To calculate transverse negative an dpositive, the maximum positive
and negative distance from the line are chosen. This benefits from the
overestimate as the transverse pos/neg are upper bound values.
"""
"""
define fit function
"""
def f(x):
return a*x**2 + b*x + c
"""
iterate over points (R, B are x and y coordinates of points) and calculate
distance to line in dehatspace
"""
dists = []
for i, (R, B) in enumerate(zip(rbs_hat[0], rbs_hat[1])):
"""
define function to minimise as square distance between datapoint and
point on curve. Squaring is monotonic so minimising radius squared is
equivalent to minimising radius
"""
def f_min(x):
y = f(x)
return((x-R)**2+(y-B)**2)
"""
perform optimisation with scipy.optmisie.fmin
"""
x_hat = fmin(f_min, R, disp=0)[0]
y_hat = f(x_hat)
"""
dehat
"""
x = x_hat/(1-x_hat-y_hat)
y = y_hat/(1-x_hat-y_hat)
rr = R/(1-R-B)
bb = B/(1-R-B)
"""
calculate euclidean distance in dehatspace
"""
dist = ((x-rr)**2+(y-bb)**2)**0.5
"""
return negative if point is below the fit curve
"""
if (x+y) > (rr+bb):
dist *= -1
dists.append(dist)
Cam.log += '\nFound closest point on fit line to each point in dehatspace'
"""
calculate wiggle factors in awb. 10% added since this is an upper bound
"""
transverse_neg = - np.min(dists) * 1.1
transverse_pos = np.max(dists) * 1.1
Cam.log += '\nTransverse pos : {:.5f}'.format(transverse_pos)
Cam.log += '\nTransverse neg : {:.5f}'.format(transverse_neg)
"""
set minimum transverse wiggles to 0.1 .
Wiggle factors dictate how far off of the curve the algorithm searches. 0.1
is a suitable minimum that gives better results for lighting conditions not
within calibration dataset. Anything less will generalise poorly.
"""
if transverse_pos < 0.01:
transverse_pos = 0.01
Cam.log += '\nForced transverse pos to 0.01'
if transverse_neg < 0.01:
transverse_neg = 0.01
Cam.log += '\nForced transverse neg to 0.01'
"""
generate new b_hat values at each r_hat according to fit
"""
r_hat_fit = np.array(rbs_hat[0])
b_hat_fit = a*r_hat_fit**2 + b*r_hat_fit + c
"""
transform from hatspace to dehatspace
"""
r_fit = r_hat_fit/(1-r_hat_fit-b_hat_fit)
b_fit = b_hat_fit/(1-r_hat_fit-b_hat_fit)
c_fit = np.round(rbs_hat[2], 0)
"""
round to 4dp
"""
r_fit = np.where((1000*r_fit) % 1 <= 0.05, r_fit+0.0001, r_fit)
r_fit = np.where((1000*r_fit) % 1 >= 0.95, r_fit-0.0001, r_fit)
b_fit = np.where((1000*b_fit) % 1 <= 0.05, b_fit+0.0001, b_fit)
b_fit = np.where((1000*b_fit) % 1 >= 0.95, b_fit-0.0001, b_fit)
r_fit = np.round(r_fit, 4)
b_fit = np.round(b_fit, 4)
"""
The following code ensures that colour temperature decreases with
increasing r/g
"""
"""
iterate backwards over list for easier indexing
"""
i = len(c_fit) - 1
while i > 0:
if c_fit[i] > c_fit[i-1]:
Cam.log += '\nColour temperature increase found\n'
Cam.log += '{} K at r = {} to '.format(c_fit[i-1], r_fit[i-1])
Cam.log += '{} K at r = {}'.format(c_fit[i], r_fit[i])
"""
if colour temperature increases then discard point furthest from
the transformed fit (dehatspace)
"""
error_1 = abs(dists[i-1])
error_2 = abs(dists[i])
Cam.log += '\nDistances from fit:\n'
Cam.log += '{} K : {:.5f} , '.format(c_fit[i], error_1)
Cam.log += '{} K : {:.5f}'.format(c_fit[i-1], error_2)
"""
find bad index
note that in python false = 0 and true = 1
"""
bad = i - (error_1 < error_2)
Cam.log += '\nPoint at {} K deleted as '.format(c_fit[bad])
Cam.log += 'it is furthest from fit'
"""
delete bad point
"""
r_fit = np.delete(r_fit, bad)
b_fit = np.delete(b_fit, bad)
c_fit = np.delete(c_fit, bad).astype(np.uint16)
"""
note that if a point has been discarded then the length has decreased
by one, meaning that decreasing the index by one will reassess the kept
point against the next point. It is therefore possible, in theory, for
two adjacent points to be discarded, although probably rare
"""
i -= 1
"""
return formatted ct curve, ordered by increasing colour temperature
"""
ct_curve = list(np.array(list(zip(b_fit, r_fit, c_fit))).flatten())[::-1]
Cam.log += '\nFinal CT curve:'
for i in range(len(ct_curve)//3):
j = 3*i
Cam.log += '\n ct: {} '.format(ct_curve[j])
Cam.log += ' r: {} '.format(ct_curve[j+1])
Cam.log += ' b: {} '.format(ct_curve[j+2])
"""
plotting code for debug
"""
if plot:
x = np.linspace(np.min(rbs_hat[0]), np.max(rbs_hat[0]), 100)
y = a*x**2 + b*x + c
plt.subplot(2, 1, 1)
plt.title('hatspace')
plt.plot(rbs_hat[0], rbs_hat[1], ls='--', color='blue')
plt.plot(x, y, color='green', ls='-')
plt.scatter(rbs_hat[0], rbs_hat[1], color='red')
for i, ct in enumerate(rbs_hat[2]):
plt.annotate(str(ct), (rbs_hat[0][i], rbs_hat[1][i]))
plt.xlabel('$\\hat{r}$')
plt.ylabel('$\\hat{b}$')
"""
optional set axes equal to shortest distance so line really does
looks perpendicular and everybody is happy
"""
# ax = plt.gca()
# ax.set_aspect('equal')
plt.grid()
plt.subplot(2, 1, 2)
plt.title('dehatspace - indoors?')
plt.plot(r_fit, b_fit, color='blue')
plt.scatter(rb_raw[0], rb_raw[1], color='green')
plt.scatter(r_fit, b_fit, color='red')
for i, ct in enumerate(c_fit):
plt.annotate(str(ct), (r_fit[i], b_fit[i]))
plt.xlabel('$r$')
plt.ylabel('$b$')
"""
optional set axes equal to shortest distance so line really does
looks perpendicular and everybody is happy
"""
# ax = plt.gca()
# ax.set_aspect('equal')
plt.subplots_adjust(hspace=0.5)
plt.grid()
plt.show()
"""
end of plotting code
"""
return(ct_curve, np.round(transverse_pos, 5), np.round(transverse_neg, 5))
"""
obtain greyscale patches and perform alsc colour correction
"""
def get_alsc_patches(Img, colour_cals, grey=True, grid_size=(16, 12)):
"""
get patch centre coordinates, image colour and the actual
patches for each channel, remembering to subtract blacklevel
If grey then only greyscale patches considered
"""
grid_w, grid_h = grid_size
if grey:
cen_coords = Img.cen_coords[3::4]
col = Img.col
patches = [np.array(Img.patches[i]) for i in Img.order]
r_patchs = patches[0][3::4] - Img.blacklevel_16
b_patchs = patches[3][3::4] - Img.blacklevel_16
"""
note two green channels are averages
"""
g_patchs = (patches[1][3::4]+patches[2][3::4])/2 - Img.blacklevel_16
else:
cen_coords = Img.cen_coords
col = Img.col
patches = [np.array(Img.patches[i]) for i in Img.order]
r_patchs = patches[0] - Img.blacklevel_16
b_patchs = patches[3] - Img.blacklevel_16
g_patchs = (patches[1]+patches[2])/2 - Img.blacklevel_16
if colour_cals is None:
return r_patchs, b_patchs, g_patchs
"""
find where image colour fits in alsc colour calibration tables
"""
cts = list(colour_cals.keys())
pos = bisect_left(cts, col)
"""
if img colour is below minimum or above maximum alsc calibration colour, simply
pick extreme closest to img colour
"""
if pos % len(cts) == 0:
"""
this works because -0 = 0 = first and -1 = last index
"""
col_tabs = np.array(colour_cals[cts[-pos//len(cts)]])
"""
else, perform linear interpolation between existing alsc colour
calibration tables
"""
else:
bef = cts[pos-1]
aft = cts[pos]
da = col-bef
db = aft-col
bef_tabs = np.array(colour_cals[bef])
aft_tabs = np.array(colour_cals[aft])
col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db)
col_tabs = np.reshape(col_tabs, (2, grid_h, grid_w))
"""
calculate dx, dy used to calculate alsc table
"""
w, h = Img.w/2, Img.h/2
dx, dy = int(-(-(w-1)//grid_w)), int(-(-(h-1)//grid_h))
"""
make list of pairs of gains for each patch by selecting the correct value
in alsc colour calibration table
"""
patch_gains = []
for cen in cen_coords:
x, y = cen[0]//dx, cen[1]//dy
# We could probably do with some better spatial interpolation here?
col_gains = (col_tabs[0][y][x], col_tabs[1][y][x])
patch_gains.append(col_gains)
"""
multiply the r and b channels in each patch by the respective gain, finally
performing the alsc colour correction
"""
for i, gains in enumerate(patch_gains):
r_patchs[i] = r_patchs[i] * gains[0]
b_patchs[i] = b_patchs[i] * gains[1]
"""
return greyscale patches, g channel and correct r, b channels
"""
return r_patchs, b_patchs, g_patchs

View File

@@ -0,0 +1,228 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2023, Raspberry Pi Ltd
#
# ctt_cac.py - CAC (Chromatic Aberration Correction) tuning tool
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from ctt_dots_locator import find_dots_locations
# This is the wrapper file that creates a JSON entry for you to append
# to your camera tuning file.
# It calculates the chromatic aberration at different points throughout
# the image and uses that to produce a martix that can then be used
# in the camera tuning files to correct this aberration.
def pprint_array(array):
# Function to print the array in a tidier format
array = array
output = ""
for i in range(len(array)):
for j in range(len(array[0])):
output += str(round(array[i, j], 2)) + ", "
# Add the necessary indentation to the array
output += "\n "
# Cut off the end of the array (nicely formats it)
return output[:-22]
def plot_shifts(red_shifts, blue_shifts):
# If users want, they can pass a command line option to show the shifts on a graph
# Can be useful to check that the functions are all working, and that the sample
# images are doing the right thing
Xs = np.array(red_shifts)[:, 0]
Ys = np.array(red_shifts)[:, 1]
Zs = np.array(red_shifts)[:, 2]
Zs2 = np.array(red_shifts)[:, 3]
Zs3 = np.array(blue_shifts)[:, 2]
Zs4 = np.array(blue_shifts)[:, 3]
fig, axs = plt.subplots(2, 2)
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.scatter(Xs, Ys, Zs, cmap=cm.jet, linewidth=0)
ax.set_title('Red X Shift')
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.scatter(Xs, Ys, Zs2, cmap=cm.jet, linewidth=0)
ax.set_title('Red Y Shift')
ax = fig.add_subplot(2, 2, 3, projection='3d')
ax.scatter(Xs, Ys, Zs3, cmap=cm.jet, linewidth=0)
ax.set_title('Blue X Shift')
ax = fig.add_subplot(2, 2, 4, projection='3d')
ax.scatter(Xs, Ys, Zs4, cmap=cm.jet, linewidth=0)
ax.set_title('Blue Y Shift')
fig.tight_layout()
plt.show()
def shifts_to_yaml(red_shift, blue_shift, image_dimensions, output_grid_size=9):
# Convert the shifts to a numpy array for easier handling and initialise other variables
red_shifts = np.array(red_shift)
blue_shifts = np.array(blue_shift)
# create a grid that's smaller than the output grid, which we then interpolate from to get the output values
xrgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
xbgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
yrgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
ybgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
xrsgrid = []
xbsgrid = []
yrsgrid = []
ybsgrid = []
xg = np.zeros((output_grid_size - 1, output_grid_size - 1))
yg = np.zeros((output_grid_size - 1, output_grid_size - 1))
# Format the grids - numpy doesn't work for this, it wants a
# nice uniformly spaced grid, which we don't know if we have yet, hence the rather mundane setup
for x in range(output_grid_size - 1):
xrsgrid.append([])
yrsgrid.append([])
xbsgrid.append([])
ybsgrid.append([])
for y in range(output_grid_size - 1):
xrsgrid[x].append([])
yrsgrid[x].append([])
xbsgrid[x].append([])
ybsgrid[x].append([])
image_size = (image_dimensions[0], image_dimensions[1])
gridxsize = image_size[0] / (output_grid_size - 1)
gridysize = image_size[1] / (output_grid_size - 1)
# Iterate through each dot, and it's shift values and put these into the correct grid location
for red_shift in red_shifts:
xgridloc = int(red_shift[0] / gridxsize)
ygridloc = int(red_shift[1] / gridysize)
xrsgrid[xgridloc][ygridloc].append(red_shift[2])
yrsgrid[xgridloc][ygridloc].append(red_shift[3])
for blue_shift in blue_shifts:
xgridloc = int(blue_shift[0] / gridxsize)
ygridloc = int(blue_shift[1] / gridysize)
xbsgrid[xgridloc][ygridloc].append(blue_shift[2])
ybsgrid[xgridloc][ygridloc].append(blue_shift[3])
# Now calculate the average pixel shift for each square in the grid
for x in range(output_grid_size - 1):
for y in range(output_grid_size - 1):
xrgrid[x, y] = np.mean(xrsgrid[x][y])
yrgrid[x, y] = np.mean(yrsgrid[x][y])
xbgrid[x, y] = np.mean(xbsgrid[x][y])
ybgrid[x, y] = np.mean(ybsgrid[x][y])
# Next, we start to interpolate the central points of the grid that gets passed to the tuning file
input_grids = np.array([xrgrid, yrgrid, xbgrid, ybgrid])
output_grids = np.zeros((4, output_grid_size, output_grid_size))
# Interpolate the centre of the grid
output_grids[:, 1:-1, 1:-1] = (input_grids[:, 1:, :-1] + input_grids[:, 1:, 1:] + input_grids[:, :-1, 1:] + input_grids[:, :-1, :-1]) / 4
# Edge cases:
output_grids[:, 1:-1, 0] = ((input_grids[:, :-1, 0] + input_grids[:, 1:, 0]) / 2 - output_grids[:, 1:-1, 1]) * 2 + output_grids[:, 1:-1, 1]
output_grids[:, 1:-1, -1] = ((input_grids[:, :-1, 7] + input_grids[:, 1:, 7]) / 2 - output_grids[:, 1:-1, -2]) * 2 + output_grids[:, 1:-1, -2]
output_grids[:, 0, 1:-1] = ((input_grids[:, 0, :-1] + input_grids[:, 0, 1:]) / 2 - output_grids[:, 1, 1:-1]) * 2 + output_grids[:, 1, 1:-1]
output_grids[:, -1, 1:-1] = ((input_grids[:, 7, :-1] + input_grids[:, 7, 1:]) / 2 - output_grids[:, -2, 1:-1]) * 2 + output_grids[:, -2, 1:-1]
# Corner Cases:
output_grids[:, 0, 0] = (output_grids[:, 0, 1] - output_grids[:, 1, 1]) + (output_grids[:, 1, 0] - output_grids[:, 1, 1]) + output_grids[:, 1, 1]
output_grids[:, 0, -1] = (output_grids[:, 0, -2] - output_grids[:, 1, -2]) + (output_grids[:, 1, -1] - output_grids[:, 1, -2]) + output_grids[:, 1, -2]
output_grids[:, -1, 0] = (output_grids[:, -1, 1] - output_grids[:, -2, 1]) + (output_grids[:, -2, 0] - output_grids[:, -2, 1]) + output_grids[:, -2, 1]
output_grids[:, -1, -1] = (output_grids[:, -2, -1] - output_grids[:, -2, -2]) + (output_grids[:, -1, -2] - output_grids[:, -2, -2]) + output_grids[:, -2, -2]
# Below, we swap the x and the y coordinates, and also multiply by a factor of -1
# This is due to the PiSP (standard) dimensions being flipped in comparison to
# PIL image coordinate directions, hence why xr -> yr. Also, the shifts calculated are colour shifts,
# and the PiSP block asks for the values it should shift by (hence the * -1, to convert from colour shift to a pixel shift)
output_grid_yr, output_grid_xr, output_grid_yb, output_grid_xb = output_grids * -1
return output_grid_xr, output_grid_yr, output_grid_xb, output_grid_yb
def analyse_dot(dot, dot_location=[0, 0]):
# Scan through the dot, calculate the centroid of each colour channel by doing:
# pixel channel brightness * distance from top left corner
# Sum these, and divide by the sum of each channel's brightnesses to get a centroid for each channel
red_channel = np.array(dot)[:, :, 0]
y_num_pixels = len(red_channel[0])
x_num_pixels = len(red_channel)
yred_weight = np.sum(np.dot(red_channel, np.arange(y_num_pixels)))
xred_weight = np.sum(np.dot(np.arange(x_num_pixels), red_channel))
red_sum = np.sum(red_channel)
green_channel = np.array(dot)[:, :, 1]
ygreen_weight = np.sum(np.dot(green_channel, np.arange(y_num_pixels)))
xgreen_weight = np.sum(np.dot(np.arange(x_num_pixels), green_channel))
green_sum = np.sum(green_channel)
blue_channel = np.array(dot)[:, :, 2]
yblue_weight = np.sum(np.dot(blue_channel, np.arange(y_num_pixels)))
xblue_weight = np.sum(np.dot(np.arange(x_num_pixels), blue_channel))
blue_sum = np.sum(blue_channel)
# We return this structure. It contains 2 arrays that contain:
# the locations of the dot center, along with the channel shifts in the x and y direction:
# [ [red_center_x, red_center_y, red_x_shift, red_y_shift], [blue_center_x, blue_center_y, blue_x_shift, blue_y_shift] ]
return [[int(dot_location[0]) + int(len(dot) / 2), int(dot_location[1]) + int(len(dot[0]) / 2), xred_weight / red_sum - xgreen_weight / green_sum, yred_weight / red_sum - ygreen_weight / green_sum], [dot_location[0] + int(len(dot) / 2), dot_location[1] + int(len(dot[0]) / 2), xblue_weight / blue_sum - xgreen_weight / green_sum, yblue_weight / blue_sum - ygreen_weight / green_sum]]
def cac(Cam):
filelist = Cam.imgs_cac
Cam.log += '\nCAC analysing files: {}'.format(str(filelist))
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
# Create arrays to hold all the dots data and their colour offsets
red_shift = [] # Format is: [[Dot Center X, Dot Center Y, x shift, y shift]]
blue_shift = []
# Iterate through the files
# Multiple files is reccomended to average out the lens aberration through rotations
for file in filelist:
Cam.log += '\nCAC processing file'
print("\n Processing file")
# Read the raw RGB values
rgb = file.rgb
image_size = [file.h, file.w] # Image size, X, Y
# Create a colour copy of the RGB values to use later in the calibration
imout = Image.new(mode="RGB", size=image_size)
rgb_image = np.array(imout)
# The rgb values need reshaping from a 1d array to a 3d array to be worked with easily
rgb.reshape((image_size[0], image_size[1], 3))
rgb_image = rgb
# Pass the RGB image through to the dots locating program
# Returns an array of the dots (colour rectangles around the dots), and an array of their locations
print("Finding dots")
Cam.log += '\nFinding dots'
dots, dots_locations = find_dots_locations(rgb_image)
# Now, analyse each dot. Work out the centroid of each colour channel, and use that to work out
# by how far the chromatic aberration has shifted each channel
Cam.log += '\nDots found: {}'.format(str(len(dots)))
print('Dots found: ' + str(len(dots)))
for dot, dot_location in zip(dots, dots_locations):
if len(dot) > 0:
if (dot_location[0] > 0) and (dot_location[1] > 0):
ret = analyse_dot(dot, dot_location)
red_shift.append(ret[0])
blue_shift.append(ret[1])
# Take our arrays of red shifts and locations, push them through to be interpolated into a 9x9 matrix
# for the CAC block to handle and then store these as a .json file to be added to the camera
# tuning file
print("\nCreating output grid")
Cam.log += '\nCreating output grid'
rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size)
print("CAC correction complete!")
Cam.log += '\nCAC correction complete!'
# Give the JSON dict back to the main ctt program
return {"strength": 1.0, "lut_rx": list(rx.round(2).reshape(81)), "lut_ry": list(ry.round(2).reshape(81)), "lut_bx": list(bx.round(2).reshape(81)), "lut_by": list(by.round(2).reshape(81))}

View File

@@ -0,0 +1,404 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool for CCM (colour correction matrix)
from ctt_image_load import *
from ctt_awb import get_alsc_patches
import colors
from scipy.optimize import minimize
from ctt_visualise import visualise_macbeth_chart
import numpy as np
"""
takes 8-bit macbeth chart values, degammas and returns 16 bit
"""
'''
This program has many options from which to derive the color matrix from.
The first is average. This minimises the average delta E across all patches of
the macbeth chart. Testing across all cameras yeilded this as the most color
accurate and vivid. Other options are avalible however.
Maximum minimises the maximum Delta E of the patches. It iterates through till
a minimum maximum is found (so that there is
not one patch that deviates wildly.)
This yields generally good results but overall the colors are less accurate
Have a fiddle with maximum and see what you think.
The final option allows you to select the patches for which to average across.
This means that you can bias certain patches, for instance if you want the
reds to be more accurate.
'''
matrix_selection_types = ["average", "maximum", "patches"]
typenum = 0 # select from array above, 0 = average, 1 = maximum, 2 = patches
test_patches = [1, 2, 5, 8, 9, 12, 14]
'''
Enter patches to test for. Can also be entered twice if you
would like twice as much bias on one patch.
'''
def degamma(x):
x = x / ((2 ** 8) - 1) # takes 255 and scales it down to one
x = np.where(x < 0.04045, x / 12.92, ((x + 0.055) / 1.055) ** 2.4)
x = x * ((2 ** 16) - 1) # takes one and scales up to 65535, 16 bit color
return x
def gamma(x):
# Take 3 long array of color values and gamma them
return [((colour / 255) ** (1 / 2.4) * 1.055 - 0.055) * 255 for colour in x]
"""
FInds colour correction matrices for list of images
"""
def ccm(Cam, cal_cr_list, cal_cb_list, grid_size):
global matrix_selection_types, typenum
imgs = Cam.imgs
"""
standard macbeth chart colour values
"""
m_rgb = np.array([ # these are in RGB
[116, 81, 67], # dark skin
[199, 147, 129], # light skin
[91, 122, 156], # blue sky
[90, 108, 64], # foliage
[130, 128, 176], # blue flower
[92, 190, 172], # bluish green
[224, 124, 47], # orange
[68, 91, 170], # purplish blue
[198, 82, 97], # moderate red
[94, 58, 106], # purple
[159, 189, 63], # yellow green
[230, 162, 39], # orange yellow
[35, 63, 147], # blue
[67, 149, 74], # green
[180, 49, 57], # red
[238, 198, 20], # yellow
[193, 84, 151], # magenta
[0, 136, 170], # cyan (goes out of gamut)
[245, 245, 243], # white 9.5
[200, 202, 202], # neutral 8
[161, 163, 163], # neutral 6.5
[121, 121, 122], # neutral 5
[82, 84, 86], # neutral 3.5
[49, 49, 51] # black 2
])
"""
convert reference colours from srgb to rgb
"""
m_srgb = degamma(m_rgb) # now in 16 bit color.
# Produce array of LAB values for ideal color chart
m_lab = [colors.RGB_to_LAB(color / 256) for color in m_srgb]
"""
reorder reference values to match how patches are ordered
"""
m_srgb = np.array([m_srgb[i::6] for i in range(6)]).reshape((24, 3))
m_lab = np.array([m_lab[i::6] for i in range(6)]).reshape((24, 3))
m_rgb = np.array([m_rgb[i::6] for i in range(6)]).reshape((24, 3))
"""
reformat alsc correction tables or set colour_cals to None if alsc is
deactivated
"""
if cal_cr_list is None:
colour_cals = None
else:
colour_cals = {}
for cr, cb in zip(cal_cr_list, cal_cb_list):
cr_tab = cr['table']
cb_tab = cb['table']
"""
normalise tables so min value is 1
"""
cr_tab = cr_tab / np.min(cr_tab)
cb_tab = cb_tab / np.min(cb_tab)
colour_cals[cr['ct']] = [cr_tab, cb_tab]
"""
for each image, perform awb and alsc corrections.
Then calculate the colour correction matrix for that image, recording the
ccm and the colour tempertaure.
"""
ccm_tab = {}
for Img in imgs:
Cam.log += '\nProcessing image: ' + Img.name
"""
get macbeth patches with alsc applied if alsc enabled.
Note: if alsc is disabled then colour_cals will be set to None and no
the function will simply return the macbeth patches
"""
r, b, g = get_alsc_patches(Img, colour_cals, grey=False, grid_size=grid_size)
"""
do awb
Note: awb is done by measuring the macbeth chart in the image, rather
than from the awb calibration. This is done so the awb will be perfect
and the ccm matrices will be more accurate.
"""
r_greys, b_greys, g_greys = r[3::4], b[3::4], g[3::4]
r_g = np.mean(r_greys / g_greys)
b_g = np.mean(b_greys / g_greys)
r = r / r_g
b = b / b_g
"""
normalise brightness wrt reference macbeth colours and then average
each channel for each patch
"""
gain = np.mean(m_srgb) / np.mean((r, g, b))
Cam.log += '\nGain with respect to standard colours: {:.3f}'.format(gain)
r = np.mean(gain * r, axis=1)
b = np.mean(gain * b, axis=1)
g = np.mean(gain * g, axis=1)
"""
calculate ccm matrix
"""
# ==== All of below should in sRGB ===##
sumde = 0
ccm = do_ccm(r, g, b, m_srgb)
# This is the initial guess that our optimisation code works with.
original_ccm = ccm
r1 = ccm[0]
r2 = ccm[1]
g1 = ccm[3]
g2 = ccm[4]
b1 = ccm[6]
b2 = ccm[7]
'''
COLOR MATRIX LOOKS AS BELOW
R1 R2 R3 Rval Outr
G1 G2 G3 * Gval = G
B1 B2 B3 Bval B
Will be optimising 6 elements and working out the third element using 1-r1-r2 = r3
'''
x0 = [r1, r2, g1, g2, b1, b2]
'''
We use our old CCM as the initial guess for the program to find the
optimised matrix
'''
result = minimize(guess, x0, args=(r, g, b, m_lab), tol=0.01)
'''
This produces a color matrix which has the lowest delta E possible,
based off the input data. Note it is impossible for this to reach
zero since the input data is imperfect
'''
Cam.log += ("\n \n Optimised Matrix Below: \n \n")
[r1, r2, g1, g2, b1, b2] = result.x
# The new, optimised color correction matrix values
optimised_ccm = [r1, r2, (1 - r1 - r2), g1, g2, (1 - g1 - g2), b1, b2, (1 - b1 - b2)]
# This is the optimised Color Matrix (preserving greys by summing rows up to 1)
Cam.log += str(optimised_ccm)
Cam.log += "\n Old Color Correction Matrix Below \n"
Cam.log += str(ccm)
formatted_ccm = np.array(original_ccm).reshape((3, 3))
'''
below is a whole load of code that then applies the latest color
matrix, and returns LAB values for color. This can then be used
to calculate the final delta E
'''
optimised_ccm_rgb = [] # Original Color Corrected Matrix RGB / LAB
optimised_ccm_lab = []
formatted_optimised_ccm = np.array(optimised_ccm).reshape((3, 3))
after_gamma_rgb = []
after_gamma_lab = []
for RGB in zip(r, g, b):
ccm_applied_rgb = np.dot(formatted_ccm, (np.array(RGB) / 256))
optimised_ccm_rgb.append(gamma(ccm_applied_rgb))
optimised_ccm_lab.append(colors.RGB_to_LAB(ccm_applied_rgb))
optimised_ccm_applied_rgb = np.dot(formatted_optimised_ccm, np.array(RGB) / 256)
after_gamma_rgb.append(gamma(optimised_ccm_applied_rgb))
after_gamma_lab.append(colors.RGB_to_LAB(optimised_ccm_applied_rgb))
'''
Gamma After RGB / LAB - not used in calculations, only used for visualisation
We now want to spit out some data that shows
how the optimisation has improved the color matrices
'''
Cam.log += "Here are the Improvements"
# CALCULATE WORST CASE delta e
old_worst_delta_e = 0
before_average = transform_and_evaluate(formatted_ccm, r, g, b, m_lab)
new_worst_delta_e = 0
after_average = transform_and_evaluate(formatted_optimised_ccm, r, g, b, m_lab)
for i in range(24):
old_delta_e = deltae(optimised_ccm_lab[i], m_lab[i]) # Current Old Delta E
new_delta_e = deltae(after_gamma_lab[i], m_lab[i]) # Current New Delta E
if old_delta_e > old_worst_delta_e:
old_worst_delta_e = old_delta_e
if new_delta_e > new_worst_delta_e:
new_worst_delta_e = new_delta_e
Cam.log += "Before color correction matrix was optimised, we got an average delta E of " + str(before_average) + " and a maximum delta E of " + str(old_worst_delta_e)
Cam.log += "After color correction matrix was optimised, we got an average delta E of " + str(after_average) + " and a maximum delta E of " + str(new_worst_delta_e)
visualise_macbeth_chart(m_rgb, optimised_ccm_rgb, after_gamma_rgb, str(Img.col) + str(matrix_selection_types[typenum]))
'''
The program will also save some visualisations of improvements.
Very pretty to look at. Top rectangle is ideal, Left square is
before optimisation, right square is after.
'''
"""
if a ccm has already been calculated for that temperature then don't
overwrite but save both. They will then be averaged later on
""" # Now going to use optimised color matrix, optimised_ccm
if Img.col in ccm_tab.keys():
ccm_tab[Img.col].append(optimised_ccm)
else:
ccm_tab[Img.col] = [optimised_ccm]
Cam.log += '\n'
Cam.log += '\nFinished processing images'
"""
average any ccms that share a colour temperature
"""
for k, v in ccm_tab.items():
tab = np.mean(v, axis=0)
tab = np.where((10000 * tab) % 1 <= 0.05, tab + 0.00001, tab)
tab = np.where((10000 * tab) % 1 >= 0.95, tab - 0.00001, tab)
ccm_tab[k] = list(np.round(tab, 5))
Cam.log += '\nMatrix calculated for colour temperature of {} K'.format(k)
"""
return all ccms with respective colour temperature in the correct format,
sorted by their colour temperature
"""
sorted_ccms = sorted(ccm_tab.items(), key=lambda kv: kv[0])
ccms = []
for i in sorted_ccms:
ccms.append({
'ct': i[0],
'ccm': i[1]
})
return ccms
def guess(x0, r, g, b, m_lab): # provides a method of numerical feedback for the optimisation code
[r1, r2, g1, g2, b1, b2] = x0
ccm = np.array([r1, r2, (1 - r1 - r2),
g1, g2, (1 - g1 - g2),
b1, b2, (1 - b1 - b2)]).reshape((3, 3)) # format the matrix correctly
return transform_and_evaluate(ccm, r, g, b, m_lab)
def transform_and_evaluate(ccm, r, g, b, m_lab): # Transforms colors to LAB and applies the correction matrix
# create list of matrix changed colors
realrgb = []
for RGB in zip(r, g, b):
rgb_post_ccm = np.dot(ccm, np.array(RGB) / 256) # This is RGB values after the color correction matrix has been applied
realrgb.append(colors.RGB_to_LAB(rgb_post_ccm))
# now compare that with m_lab and return numeric result, averaged for each patch
return (sumde(realrgb, m_lab) / 24) # returns an average result of delta E
def sumde(listA, listB):
global typenum, test_patches
sumde = 0
maxde = 0
patchde = [] # Create array of the delta E values for each patch. useful for optimisation of certain patches
for listA_item, listB_item in zip(listA, listB):
if maxde < (deltae(listA_item, listB_item)):
maxde = deltae(listA_item, listB_item)
patchde.append(deltae(listA_item, listB_item))
sumde += deltae(listA_item, listB_item)
'''
The different options specified at the start allow for
the maximum to be returned, average or specific patches
'''
if typenum == 0:
return sumde
if typenum == 1:
return maxde
if typenum == 2:
output = sum([patchde[test_patch] for test_patch in test_patches])
# Selects only certain patches and returns the output for them
return output
"""
calculates the ccm for an individual image.
ccms are calculated in rgb space, and are fit by hand. Although it is a 3x3
matrix, each row must add up to 1 in order to conserve greyness, simplifying
calculation.
The initial CCM is calculated in RGB, and then optimised in LAB color space
This simplifies the initial calculation but then gets us the accuracy of
using LAB color space.
"""
def do_ccm(r, g, b, m_srgb):
rb = r-b
gb = g-b
rb_2s = (rb * rb)
rb_gbs = (rb * gb)
gb_2s = (gb * gb)
r_rbs = rb * (m_srgb[..., 0] - b)
r_gbs = gb * (m_srgb[..., 0] - b)
g_rbs = rb * (m_srgb[..., 1] - b)
g_gbs = gb * (m_srgb[..., 1] - b)
b_rbs = rb * (m_srgb[..., 2] - b)
b_gbs = gb * (m_srgb[..., 2] - b)
"""
Obtain least squares fit
"""
rb_2 = np.sum(rb_2s)
gb_2 = np.sum(gb_2s)
rb_gb = np.sum(rb_gbs)
r_rb = np.sum(r_rbs)
r_gb = np.sum(r_gbs)
g_rb = np.sum(g_rbs)
g_gb = np.sum(g_gbs)
b_rb = np.sum(b_rbs)
b_gb = np.sum(b_gbs)
det = rb_2 * gb_2 - rb_gb * rb_gb
"""
Raise error if matrix is singular...
This shouldn't really happen with real data but if it does just take new
pictures and try again, not much else to be done unfortunately...
"""
if det < 0.001:
raise ArithmeticError
r_a = (gb_2 * r_rb - rb_gb * r_gb) / det
r_b = (rb_2 * r_gb - rb_gb * r_rb) / det
"""
Last row can be calculated by knowing the sum must be 1
"""
r_c = 1 - r_a - r_b
g_a = (gb_2 * g_rb - rb_gb * g_gb) / det
g_b = (rb_2 * g_gb - rb_gb * g_rb) / det
g_c = 1 - g_a - g_b
b_a = (gb_2 * b_rb - rb_gb * b_gb) / det
b_b = (rb_2 * b_gb - rb_gb * b_rb) / det
b_c = 1 - b_a - b_b
"""
format ccm
"""
ccm = [r_a, r_b, r_c, g_a, g_b, g_c, b_a, b_b, b_c]
return ccm
def deltae(colorA, colorB):
return ((colorA[0] - colorB[0]) ** 2 + (colorA[1] - colorB[1]) ** 2 + (colorA[2] - colorB[2]) ** 2) ** 0.5
# return ((colorA[1]-colorB[1]) * * 2 + (colorA[2]-colorB[2]) * * 2) * * 0.5
# UNCOMMENT IF YOU WANT TO NEGLECT LUMINANCE FROM CALCULATION OF DELTA E

View File

@@ -0,0 +1,17 @@
{
"disable": [],
"plot": [],
"alsc": {
"do_alsc_colour": 1,
"luminance_strength": 0.8,
"max_gain": 8.0
},
"awb": {
"greyworld": 0
},
"blacklevel": -1,
"macbeth": {
"small": 0,
"show": 0
}
}

View File

@@ -0,0 +1,118 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2023, Raspberry Pi Ltd
#
# find_dots.py - Used by CAC algorithm to convert image to set of dots
'''
This file takes the black and white version of the image, along with
the color version. It then located the black dots on the image by
thresholding dark pixels.
In a rather fun way, the algorithm bounces around the thresholded area in a random path
We then use the maximum and minimum of these paths to determine the dot shape and size
This info is then used to return colored dots and locations back to the main file
'''
import numpy as np
import random
from PIL import Image, ImageEnhance, ImageFilter
def find_dots_locations(rgb_image, color_threshold=100, dots_edge_avoid=75, image_edge_avoid=10, search_path_length=500, grid_scan_step_size=10, logfile=open("log.txt", "a+")):
# Initialise some starting variables
pixels = Image.fromarray(rgb_image)
pixels = pixels.convert("L")
enhancer = ImageEnhance.Contrast(pixels)
im_output = enhancer.enhance(1.4)
# We smooth it slightly to make it easier for the dot recognition program to locate the dots
im_output = im_output.filter(ImageFilter.GaussianBlur(radius=2))
bw_image = np.array(im_output)
location = [0, 0]
dots = []
dots_location = []
# the program takes away the edges - we don't want a dot that is half a circle, the
# centroids would all be wrong
for x in range(dots_edge_avoid, len(bw_image) - dots_edge_avoid, grid_scan_step_size):
for y in range(dots_edge_avoid, len(bw_image[0]) - dots_edge_avoid, grid_scan_step_size):
location = [x, y]
scrap_dot = False # A variable used to make sure that this is a valid dot
if (bw_image[location[0], location[1]] < color_threshold) and not (scrap_dot):
heading = "south" # Define a starting direction to move in
coords = []
for i in range(search_path_length): # Creates a path of length `search_path_length`. This turns out to always be enough to work out the rough shape of the dot.
# Now make sure that the thresholded area doesn't come within 10 pixels of the edge of the image, ensures we capture all the CA
if ((image_edge_avoid < location[0] < len(bw_image) - image_edge_avoid) and (image_edge_avoid < location[1] < len(bw_image[0]) - image_edge_avoid)) and not (scrap_dot):
if heading == "south":
if bw_image[location[0] + 1, location[1]] < color_threshold:
# Here, notice it does not go south, but actually goes southeast
# This is crucial in ensuring that we make our way around the majority of the dot
location[0] = location[0] + 1
location[1] = location[1] + 1
heading = "south"
else:
# This happens when we reach a thresholded edge. We now randomly change direction and keep searching
dir = random.randint(1, 2)
if dir == 1:
heading = "west"
if dir == 2:
heading = "east"
if heading == "east":
if bw_image[location[0], location[1] + 1] < color_threshold:
location[1] = location[1] + 1
heading = "east"
else:
dir = random.randint(1, 2)
if dir == 1:
heading = "north"
if dir == 2:
heading = "south"
if heading == "west":
if bw_image[location[0], location[1] - 1] < color_threshold:
location[1] = location[1] - 1
heading = "west"
else:
dir = random.randint(1, 2)
if dir == 1:
heading = "north"
if dir == 2:
heading = "south"
if heading == "north":
if bw_image[location[0] - 1, location[1]] < color_threshold:
location[0] = location[0] - 1
heading = "north"
else:
dir = random.randint(1, 2)
if dir == 1:
heading = "west"
if dir == 2:
heading = "east"
# Log where our particle travels across the dot
coords.append([location[0], location[1]])
else:
scrap_dot = True # We just don't have enough space around the dot, discard this one, and move on
if not scrap_dot:
# get the size of the dot surrounding the dot
x_coords = np.array(coords)[:, 0]
y_coords = np.array(coords)[:, 1]
hsquaresize = max(list(x_coords)) - min(list(x_coords))
vsquaresize = max(list(y_coords)) - min(list(y_coords))
# Create the bounding coordinates of the rectangle surrounding the dot
# Program uses the dotsize + half of the dotsize to ensure we get all that color fringing
extra_space_factor = 0.45
top_left_x = (min(list(x_coords)) - int(hsquaresize * extra_space_factor))
btm_right_x = max(list(x_coords)) + int(hsquaresize * extra_space_factor)
top_left_y = (min(list(y_coords)) - int(vsquaresize * extra_space_factor))
btm_right_y = max(list(y_coords)) + int(vsquaresize * extra_space_factor)
# Overwrite the area of the dot to ensure we don't use it again
bw_image[top_left_x:btm_right_x, top_left_y:btm_right_y] = 255
# Add the color version of the dot to the list to send off, along with some coordinates.
dots.append(rgb_image[top_left_x:btm_right_x, top_left_y:btm_right_y])
dots_location.append([top_left_x, top_left_y])
else:
# Dot was too close to the image border to be useable
pass
return dots, dots_location

View File

@@ -0,0 +1,181 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool for GEQ (green equalisation)
from ctt_tools import *
import matplotlib.pyplot as plt
import scipy.optimize as optimize
"""
Uses green differences in macbeth patches to fit green equalisation threshold
model. Ideally, all macbeth chart centres would fall below the threshold as
these should be corrected by geq.
"""
def geq_fit(Cam, plot):
imgs = Cam.imgs
"""
green equalisation to mitigate mazing.
Fits geq model by looking at difference
between greens in macbeth patches
"""
geqs = np.array([geq(Cam, Img)*Img.againQ8_norm for Img in imgs])
Cam.log += '\nProcessed all images'
geqs = geqs.reshape((-1, 2))
"""
data is sorted by green difference and top half is selected since higher
green difference data define the decision boundary.
"""
geqs = np.array(sorted(geqs, key=lambda r: np.abs((r[1]-r[0])/r[0])))
length = len(geqs)
g0 = geqs[length//2:, 0]
g1 = geqs[length//2:, 1]
gdiff = np.abs(g0-g1)
"""
find linear fit by minimising asymmetric least square errors
in order to cover most of the macbeth images.
the philosophy here is that every macbeth patch should fall within the
threshold, hence the upper bound approach
"""
def f(params):
m, c = params
a = gdiff - (m*g0+c)
"""
asymmetric square error returns:
1.95 * a**2 if a is positive
0.05 * a**2 if a is negative
"""
return(np.sum(a**2+0.95*np.abs(a)*a))
initial_guess = [0.01, 500]
"""
Nelder-Mead is usually not the most desirable optimisation method
but has been chosen here due to its robustness to undifferentiability
(is that a word?)
"""
result = optimize.minimize(f, initial_guess, method='Nelder-Mead')
"""
need to check if the fit worked correectly
"""
if result.success:
slope, offset = result.x
Cam.log += '\nFit result: slope = {:.5f} '.format(slope)
Cam.log += 'offset = {}'.format(int(offset))
"""
optional plotting code
"""
if plot:
x = np.linspace(max(g0)*1.1, 100)
y = slope*x + offset
plt.title('GEQ Asymmetric \'Upper Bound\' Fit')
plt.plot(x, y, color='red', ls='--', label='fit')
plt.scatter(g0, gdiff, color='b', label='data')
plt.ylabel('Difference in green channels')
plt.xlabel('Green value')
"""
This upper bound asymmetric gives correct order of magnitude values.
The pipeline approximates a 1st derivative of a gaussian with some
linear piecewise functions, introducing arbitrary cutoffs. For
pessimistic geq, the model parameters have been increased by a
scaling factor/constant.
Feel free to tune these or edit the json files directly if you
belive there are still mazing effects left (threshold too low) or if you
think it is being overcorrected (threshold too high).
We have gone for a one size fits most approach that will produce
acceptable results in most applications.
"""
slope *= 1.5
offset += 201
Cam.log += '\nFit after correction factors: slope = {:.5f}'.format(slope)
Cam.log += ' offset = {}'.format(int(offset))
"""
clamp offset at 0 due to pipeline considerations
"""
if offset < 0:
Cam.log += '\nOffset raised to 0'
offset = 0
"""
optional plotting code
"""
if plot:
y2 = slope*x + offset
plt.plot(x, y2, color='green', ls='--', label='scaled fit')
plt.grid()
plt.legend()
plt.show()
"""
the case where for some reason the fit didn't work correctly
Transpose data and then least squares linear fit. Transposing data
makes it robust to many patches where green difference is the same
since they only contribute to one error minimisation, instead of dragging
the entire linear fit down.
"""
else:
print('\nError! Couldn\'t fit asymmetric lest squares')
print(result.message)
Cam.log += '\nWARNING: Asymmetric least squares fit failed! '
Cam.log += 'Standard fit used could possibly lead to worse results'
fit = np.polyfit(gdiff, g0, 1)
offset, slope = -fit[1]/fit[0], 1/fit[0]
Cam.log += '\nFit result: slope = {:.5f} '.format(slope)
Cam.log += 'offset = {}'.format(int(offset))
"""
optional plotting code
"""
if plot:
x = np.linspace(max(g0)*1.1, 100)
y = slope*x + offset
plt.title('GEQ Linear Fit')
plt.plot(x, y, color='red', ls='--', label='fit')
plt.scatter(g0, gdiff, color='b', label='data')
plt.ylabel('Difference in green channels')
plt.xlabel('Green value')
"""
Scaling factors (see previous justification)
The model here will not be an upper bound so scaling factors have
been increased.
This method of deriving geq model parameters is extremely arbitrary
and undesirable.
"""
slope *= 2.5
offset += 301
Cam.log += '\nFit after correction factors: slope = {:.5f}'.format(slope)
Cam.log += ' offset = {}'.format(int(offset))
if offset < 0:
Cam.log += '\nOffset raised to 0'
offset = 0
"""
optional plotting code
"""
if plot:
y2 = slope*x + offset
plt.plot(x, y2, color='green', ls='--', label='scaled fit')
plt.legend()
plt.grid()
plt.show()
return round(slope, 5), int(offset)
""""
Return green channels of macbeth patches
returns g0, g1 where
> g0 is green next to red
> g1 is green next to blue
"""
def geq(Cam, Img):
Cam.log += '\nProcessing image {}'.format(Img.name)
patches = [Img.patches[i] for i in Img.order][1:3]
g_patches = np.array([(np.mean(patches[0][i]), np.mean(patches[1][i])) for i in range(24)])
Cam.log += '\n'
return(g_patches)

View File

@@ -0,0 +1,455 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019-2020, Raspberry Pi Ltd
#
# camera tuning tool image loading
from ctt_tools import *
from ctt_macbeth_locator import *
import json
import pyexiv2 as pyexif
import rawpy as raw
"""
Image class load image from raw data and extracts metadata.
Once image is extracted from data, it finds 24 16x16 patches for each
channel, centred at the macbeth chart squares
"""
class Image:
def __init__(self, buf):
self.buf = buf
self.patches = None
self.saturated = False
'''
obtain metadata from buffer
'''
def get_meta(self):
self.ver = ba_to_b(self.buf[4:5])
self.w = ba_to_b(self.buf[0xd0:0xd2])
self.h = ba_to_b(self.buf[0xd2:0xd4])
self.pad = ba_to_b(self.buf[0xd4:0xd6])
self.fmt = self.buf[0xf5]
self.sigbits = 2*self.fmt + 4
self.pattern = self.buf[0xf4]
self.exposure = ba_to_b(self.buf[0x90:0x94])
self.againQ8 = ba_to_b(self.buf[0x94:0x96])
self.againQ8_norm = self.againQ8/256
camName = self.buf[0x10:0x10+128]
camName_end = camName.find(0x00)
self.camName = self.buf[0x10:0x10+128][:camName_end].decode()
"""
Channel order depending on bayer pattern
"""
bayer_case = {
0: (0, 1, 2, 3), # red
1: (2, 0, 3, 1), # green next to red
2: (3, 2, 1, 0), # green next to blue
3: (1, 0, 3, 2), # blue
128: (0, 1, 2, 3) # arbitrary order for greyscale casw
}
self.order = bayer_case[self.pattern]
'''
manual blacklevel - not robust
'''
if 'ov5647' in self.camName:
self.blacklevel = 16
else:
self.blacklevel = 64
self.blacklevel_16 = self.blacklevel << (6)
return 1
'''
print metadata for debug
'''
def print_meta(self):
print('\nData:')
print(' ver = {}'.format(self.ver))
print(' w = {}'.format(self.w))
print(' h = {}'.format(self.h))
print(' pad = {}'.format(self.pad))
print(' fmt = {}'.format(self.fmt))
print(' sigbits = {}'.format(self.sigbits))
print(' pattern = {}'.format(self.pattern))
print(' exposure = {}'.format(self.exposure))
print(' againQ8 = {}'.format(self.againQ8))
print(' againQ8_norm = {}'.format(self.againQ8_norm))
print(' camName = {}'.format(self.camName))
print(' blacklevel = {}'.format(self.blacklevel))
print(' blacklevel_16 = {}'.format(self.blacklevel_16))
return 1
"""
get image from raw scanline data
"""
def get_image(self, raw):
self.dptr = []
"""
check if data is 10 or 12 bits
"""
if self.sigbits == 10:
"""
calc length of scanline
"""
lin_len = ((((((self.w+self.pad+3)>>2)) * 5)+31)>>5) * 32
"""
stack scan lines into matrix
"""
raw = np.array(raw).reshape(-1, lin_len).astype(np.int64)[:self.h, ...]
"""
separate 5 bits in each package, stopping when w is satisfied
"""
ba0 = raw[..., 0:5*((self.w+3)>>2):5]
ba1 = raw[..., 1:5*((self.w+3)>>2):5]
ba2 = raw[..., 2:5*((self.w+3)>>2):5]
ba3 = raw[..., 3:5*((self.w+3)>>2):5]
ba4 = raw[..., 4:5*((self.w+3)>>2):5]
"""
assemble 10 bit numbers
"""
ch0 = np.left_shift((np.left_shift(ba0, 2) + (ba4 % 4)), 6)
ch1 = np.left_shift((np.left_shift(ba1, 2) + (np.right_shift(ba4, 2) % 4)), 6)
ch2 = np.left_shift((np.left_shift(ba2, 2) + (np.right_shift(ba4, 4) % 4)), 6)
ch3 = np.left_shift((np.left_shift(ba3, 2) + (np.right_shift(ba4, 6) % 4)), 6)
"""
interleave bits
"""
mat = np.empty((self.h, self.w), dtype=ch0.dtype)
mat[..., 0::4] = ch0
mat[..., 1::4] = ch1
mat[..., 2::4] = ch2
mat[..., 3::4] = ch3
"""
There is som eleaking memory somewhere in the code. This code here
seemed to make things good enough that the code would run for
reasonable numbers of images, however this is techincally just a
workaround. (sorry)
"""
ba0, ba1, ba2, ba3, ba4 = None, None, None, None, None
del ba0, ba1, ba2, ba3, ba4
ch0, ch1, ch2, ch3 = None, None, None, None
del ch0, ch1, ch2, ch3
"""
same as before but 12 bit case
"""
elif self.sigbits == 12:
lin_len = ((((((self.w+self.pad+1)>>1)) * 3)+31)>>5) * 32
raw = np.array(raw).reshape(-1, lin_len).astype(np.int64)[:self.h, ...]
ba0 = raw[..., 0:3*((self.w+1)>>1):3]
ba1 = raw[..., 1:3*((self.w+1)>>1):3]
ba2 = raw[..., 2:3*((self.w+1)>>1):3]
ch0 = np.left_shift((np.left_shift(ba0, 4) + ba2 % 16), 4)
ch1 = np.left_shift((np.left_shift(ba1, 4) + (np.right_shift(ba2, 4)) % 16), 4)
mat = np.empty((self.h, self.w), dtype=ch0.dtype)
mat[..., 0::2] = ch0
mat[..., 1::2] = ch1
else:
"""
data is neither 10 nor 12 or incorrect data
"""
print('ERROR: wrong bit format, only 10 or 12 bit supported')
return 0
"""
separate bayer channels
"""
c0 = mat[0::2, 0::2]
c1 = mat[0::2, 1::2]
c2 = mat[1::2, 0::2]
c3 = mat[1::2, 1::2]
self.channels = [c0, c1, c2, c3]
return 1
"""
obtain 16x16 patch centred at macbeth square centre for each channel
"""
def get_patches(self, cen_coords, size=16):
"""
obtain channel widths and heights
"""
ch_w, ch_h = self.w, self.h
cen_coords = list(np.array((cen_coords[0])).astype(np.int32))
self.cen_coords = cen_coords
"""
squares are ordered by stacking macbeth chart columns from
left to right. Some useful patch indices:
white = 3
black = 23
'reds' = 9, 10
'blues' = 2, 5, 8, 20, 22
'greens' = 6, 12, 17
greyscale = 3, 7, 11, 15, 19, 23
"""
all_patches = []
for ch in self.channels:
ch_patches = []
for cen in cen_coords:
'''
macbeth centre is placed at top left of central 2x2 patch
to account for rounding
Patch pixels are sorted by pixel brightness so spatial
information is lost.
'''
patch = ch[cen[1]-7:cen[1]+9, cen[0]-7:cen[0]+9].flatten()
patch.sort()
if patch[-5] == (2**self.sigbits-1)*2**(16-self.sigbits):
self.saturated = True
ch_patches.append(patch)
# print('\nNew Patch\n')
all_patches.append(ch_patches)
# print('\n\nNew Channel\n\n')
self.patches = all_patches
return 1
def brcm_load_image(Cam, im_str):
"""
Load image where raw data and metadata is in the BRCM format
"""
try:
"""
create byte array
"""
with open(im_str, 'rb') as image:
f = image.read()
b = bytearray(f)
"""
return error if incorrect image address
"""
except FileNotFoundError:
print('\nERROR:\nInvalid image address')
Cam.log += '\nWARNING: Invalid image address'
return 0
"""
return error if problem reading file
"""
if f is None:
print('\nERROR:\nProblem reading file')
Cam.log += '\nWARNING: Problem readin file'
return 0
# print('\nLooking for EOI and BRCM header')
"""
find end of image followed by BRCM header by turning
bytearray into hex string and string matching with regexp
"""
start = -1
match = bytearray(b'\xff\xd9@BRCM')
match_str = binascii.hexlify(match)
b_str = binascii.hexlify(b)
"""
note index is divided by two to go from string to hex
"""
indices = [m.start()//2 for m in re.finditer(match_str, b_str)]
# print(indices)
try:
start = indices[0] + 3
except IndexError:
print('\nERROR:\nNo Broadcom header found')
Cam.log += '\nWARNING: No Broadcom header found!'
return 0
"""
extract data after header
"""
# print('\nExtracting data after header')
buf = b[start:start+32768]
Img = Image(buf)
Img.str = im_str
# print('Data found successfully')
"""
obtain metadata
"""
# print('\nReading metadata')
Img.get_meta()
Cam.log += '\nExposure : {} us'.format(Img.exposure)
Cam.log += '\nNormalised gain : {}'.format(Img.againQ8_norm)
# print('Metadata read successfully')
"""
obtain raw image data
"""
# print('\nObtaining raw image data')
raw = b[start+32768:]
Img.get_image(raw)
"""
delete raw to stop memory errors
"""
raw = None
del raw
# print('Raw image data obtained successfully')
return Img
def dng_load_image(Cam, im_str):
try:
Img = Image(None)
# RawPy doesn't load all the image tags that we need, so we use py3exiv2
metadata = pyexif.ImageMetadata(im_str)
metadata.read()
Img.ver = 100 # random value
"""
The DNG and TIFF/EP specifications use different IFDs to store the raw
image data and the Exif tags. DNG stores them in a SubIFD and in an Exif
IFD respectively (named "SubImage1" and "Photo" by pyexiv2), while
TIFF/EP stores them both in IFD0 (name "Image"). Both are used in "DNG"
files, with libcamera-apps following the DNG recommendation and
applications based on picamera2 following TIFF/EP.
This code detects which tags are being used, and therefore extracts the
correct values.
"""
try:
Img.w = metadata['Exif.SubImage1.ImageWidth'].value
subimage = "SubImage1"
photo = "Photo"
except KeyError:
Img.w = metadata['Exif.Image.ImageWidth'].value
subimage = "Image"
photo = "Image"
Img.pad = 0
Img.h = metadata[f'Exif.{subimage}.ImageLength'].value
white = metadata[f'Exif.{subimage}.WhiteLevel'].value
Img.sigbits = int(white).bit_length()
Img.fmt = (Img.sigbits - 4) // 2
Img.exposure = int(metadata[f'Exif.{photo}.ExposureTime'].value * 1000000)
Img.againQ8 = metadata[f'Exif.{photo}.ISOSpeedRatings'].value * 256 / 100
Img.againQ8_norm = Img.againQ8 / 256
Img.camName = metadata['Exif.Image.Model'].value
Img.blacklevel = int(metadata[f'Exif.{subimage}.BlackLevel'].value[0])
Img.blacklevel_16 = Img.blacklevel << (16 - Img.sigbits)
bayer_case = {
'0 1 1 2': (0, (0, 1, 2, 3)),
'1 2 0 1': (1, (2, 0, 3, 1)),
'2 1 1 0': (2, (3, 2, 1, 0)),
'1 0 2 1': (3, (1, 0, 3, 2))
}
cfa_pattern = metadata[f'Exif.{subimage}.CFAPattern'].value
Img.pattern = bayer_case[cfa_pattern][0]
Img.order = bayer_case[cfa_pattern][1]
# Now use RawPy tp get the raw Bayer pixels
raw_im = raw.imread(im_str)
raw_data = raw_im.raw_image
shift = 16 - Img.sigbits
c0 = np.left_shift(raw_data[0::2, 0::2].astype(np.int64), shift)
c1 = np.left_shift(raw_data[0::2, 1::2].astype(np.int64), shift)
c2 = np.left_shift(raw_data[1::2, 0::2].astype(np.int64), shift)
c3 = np.left_shift(raw_data[1::2, 1::2].astype(np.int64), shift)
Img.channels = [c0, c1, c2, c3]
Img.rgb = raw_im.postprocess()
except Exception:
print("\nERROR: failed to load DNG file", im_str)
print("Either file does not exist or is incompatible")
Cam.log += '\nERROR: DNG file does not exist or is incompatible'
raise
return Img
'''
load image from file location and perform calibration
check correct filetype
mac boolean is true if image is expected to contain macbeth chart and false
if not (alsc images don't have macbeth charts)
'''
def load_image(Cam, im_str, mac_config=None, show=False, mac=True, show_meta=False):
"""
check image is correct filetype
"""
if '.jpg' in im_str or '.jpeg' in im_str or '.brcm' in im_str or '.dng' in im_str:
if '.dng' in im_str:
Img = dng_load_image(Cam, im_str)
else:
Img = brcm_load_image(Cam, im_str)
"""
handle errors smoothly if loading image failed
"""
if Img == 0:
return 0
if show_meta:
Img.print_meta()
if mac:
"""
find macbeth centres, discarding images that are too dark or light
"""
av_chan = (np.mean(np.array(Img.channels), axis=0)/(2**16))
av_val = np.mean(av_chan)
# print(av_val)
if av_val < Img.blacklevel_16/(2**16)+1/64:
macbeth = None
print('\nError: Image too dark!')
Cam.log += '\nWARNING: Image too dark!'
else:
macbeth = find_macbeth(Cam, av_chan, mac_config)
"""
if no macbeth found return error
"""
if macbeth is None:
print('\nERROR: No macbeth chart found')
return 0
mac_cen_coords = macbeth[1]
# print('\nMacbeth centres located successfully')
"""
obtain image patches
"""
# print('\nObtaining image patches')
Img.get_patches(mac_cen_coords)
if Img.saturated:
print('\nERROR: Macbeth patches have saturated')
Cam.log += '\nWARNING: Macbeth patches have saturated!'
return 0
"""
clear memory
"""
Img.buf = None
del Img.buf
# print('Image patches obtained successfully')
"""
optional debug
"""
if show and __name__ == '__main__':
copy = sum(Img.channels)/2**18
copy = np.reshape(copy, (Img.h//2, Img.w//2)).astype(np.float64)
copy, _ = reshape(copy, 800)
represent(copy)
return Img
"""
return error if incorrect filetype
"""
else:
# print('\nERROR:\nInvalid file extension')
return 0
"""
bytearray splice to number little endian
"""
def ba_to_b(b):
total = 0
for i in range(len(b)):
total += 256**i * b[i]
return total

View File

@@ -0,0 +1,61 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool for lux level
from ctt_tools import *
"""
Find lux values from metadata and calculate Y
"""
def lux(Cam, Img):
shutter_speed = Img.exposure
gain = Img.againQ8_norm
aperture = 1
Cam.log += '\nShutter speed = {}'.format(shutter_speed)
Cam.log += '\nGain = {}'.format(gain)
Cam.log += '\nAperture = {}'.format(aperture)
patches = [Img.patches[i] for i in Img.order]
channels = [Img.channels[i] for i in Img.order]
return lux_calc(Cam, Img, patches, channels), shutter_speed, gain
"""
perform lux calibration on bayer channels
"""
def lux_calc(Cam, Img, patches, channels):
"""
find means color channels on grey patches
"""
ap_r = np.mean(patches[0][3::4])
ap_g = (np.mean(patches[1][3::4])+np.mean(patches[2][3::4]))/2
ap_b = np.mean(patches[3][3::4])
Cam.log += '\nAverage channel values on grey patches:'
Cam.log += '\nRed = {:.0f} Green = {:.0f} Blue = {:.0f}'.format(ap_r, ap_b, ap_g)
# print(ap_r, ap_g, ap_b)
"""
calculate channel gains
"""
gr = ap_g/ap_r
gb = ap_g/ap_b
Cam.log += '\nChannel gains: Red = {:.3f} Blue = {:.3f}'.format(gr, gb)
"""
find means color channels on image and scale by gain
note greens are averaged together (treated as one channel)
"""
a_r = np.mean(channels[0])*gr
a_g = (np.mean(channels[1])+np.mean(channels[2]))/2
a_b = np.mean(channels[3])*gb
Cam.log += '\nAverage channel values over entire image scaled by channel gains:'
Cam.log += '\nRed = {:.0f} Green = {:.0f} Blue = {:.0f}'.format(a_r, a_b, a_g)
# print(a_r, a_g, a_b)
"""
Calculate y with top row of yuv matrix
"""
y = 0.299*a_r + 0.587*a_g + 0.114*a_b
Cam.log += '\nY value calculated: {}'.format(int(y))
# print(y)
return int(y)

View File

@@ -0,0 +1,757 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool Macbeth chart locator
from ctt_ransac import *
from ctt_tools import *
import warnings
"""
NOTE: some custom functions have been used here to make the code more readable.
These are defined in tools.py if they are needed for reference.
"""
"""
Some inconsistencies between packages cause runtime warnings when running
the clustering algorithm. This catches these warnings so they don't flood the
output to the console
"""
def fxn():
warnings.warn("runtime", RuntimeWarning)
"""
Define the success message
"""
success_msg = 'Macbeth chart located successfully'
def find_macbeth(Cam, img, mac_config=(0, 0)):
small_chart, show = mac_config
print('Locating macbeth chart')
Cam.log += '\nLocating macbeth chart'
"""
catch the warnings
"""
warnings.simplefilter("ignore")
fxn()
"""
Reference macbeth chart is created that will be correlated with the located
macbeth chart guess to produce a confidence value for the match.
"""
ref = cv2.imread(Cam.path + 'ctt_ref.pgm', flags=cv2.IMREAD_GRAYSCALE)
ref_w = 120
ref_h = 80
rc1 = (0, 0)
rc2 = (0, ref_h)
rc3 = (ref_w, ref_h)
rc4 = (ref_w, 0)
ref_corns = np.array((rc1, rc2, rc3, rc4), np.float32)
ref_data = (ref, ref_w, ref_h, ref_corns)
"""
locate macbeth chart
"""
cor, mac, coords, msg = get_macbeth_chart(img, ref_data)
# Keep a list that will include this and any brightened up versions of
# the image for reuse.
all_images = [img]
"""
following bits of code tries to fix common problems with simple
techniques.
If now or at any point the best correlation is of above 0.75, then
nothing more is tried as this is a high enough confidence to ensure
reliable macbeth square centre placement.
"""
"""
brighten image 2x
"""
if cor < 0.75:
a = 2
img_br = cv2.convertScaleAbs(img, alpha=a, beta=0)
all_images.append(img_br)
cor_b, mac_b, coords_b, msg_b = get_macbeth_chart(img_br, ref_data)
if cor_b > cor:
cor, mac, coords, msg = cor_b, mac_b, coords_b, msg_b
"""
brighten image 4x
"""
if cor < 0.75:
a = 4
img_br = cv2.convertScaleAbs(img, alpha=a, beta=0)
all_images.append(img_br)
cor_b, mac_b, coords_b, msg_b = get_macbeth_chart(img_br, ref_data)
if cor_b > cor:
cor, mac, coords, msg = cor_b, mac_b, coords_b, msg_b
"""
In case macbeth chart is too small, take a selection of the image and
attempt to locate macbeth chart within that. The scale increment is
root 2
"""
"""
These variables will be used to transform the found coordinates at smaller
scales back into the original. If ii is still -1 after this section that
means it was not successful
"""
ii = -1
w_best = 0
h_best = 0
d_best = 100
"""
d_best records the scale of the best match. Macbeth charts are only looked
for at one scale increment smaller than the current best match in order to avoid
unecessarily searching for macbeth charts at small scales.
If a macbeth chart ha already been found then set d_best to 0
"""
if cor != 0:
d_best = 0
"""
scale 3/2 (approx root2)
"""
if cor < 0.75:
imgs = []
"""
get size of image
"""
shape = list(img.shape[:2])
w, h = shape
"""
set dimensions of the subselection and the step along each axis between
selections
"""
w_sel = int(2*w/3)
h_sel = int(2*h/3)
w_inc = int(w/6)
h_inc = int(h/6)
"""
for each subselection, look for a macbeth chart
loop over this and any brightened up images that we made to increase the
likelihood of success
"""
for img_br in all_images:
for i in range(3):
for j in range(3):
w_s, h_s = i*w_inc, j*h_inc
img_sel = img_br[w_s:w_s+w_sel, h_s:h_s+h_sel]
cor_ij, mac_ij, coords_ij, msg_ij = get_macbeth_chart(img_sel, ref_data)
"""
if the correlation is better than the best then record the
scale and current subselection at which macbeth chart was
found. Also record the coordinates, macbeth chart and message.
"""
if cor_ij > cor:
cor = cor_ij
mac, coords, msg = mac_ij, coords_ij, msg_ij
ii, jj = i, j
w_best, h_best = w_inc, h_inc
d_best = 1
"""
scale 2
"""
if cor < 0.75:
imgs = []
shape = list(img.shape[:2])
w, h = shape
w_sel = int(w/2)
h_sel = int(h/2)
w_inc = int(w/8)
h_inc = int(h/8)
# Again, loop over any brightened up images as well
for img_br in all_images:
for i in range(5):
for j in range(5):
w_s, h_s = i*w_inc, j*h_inc
img_sel = img_br[w_s:w_s+w_sel, h_s:h_s+h_sel]
cor_ij, mac_ij, coords_ij, msg_ij = get_macbeth_chart(img_sel, ref_data)
if cor_ij > cor:
cor = cor_ij
mac, coords, msg = mac_ij, coords_ij, msg_ij
ii, jj = i, j
w_best, h_best = w_inc, h_inc
d_best = 2
"""
The following code checks for macbeth charts at even smaller scales. This
slows the code down significantly and has therefore been omitted by default,
however it is not unusably slow so might be useful if the macbeth chart
is too small to be picked up to by the current subselections.
Use this for macbeth charts with side lengths around 1/5 image dimensions
(and smaller...?) it is, however, recommended that macbeth charts take up as
large as possible a proportion of the image.
"""
if small_chart:
if cor < 0.75 and d_best > 1:
imgs = []
shape = list(img.shape[:2])
w, h = shape
w_sel = int(w/3)
h_sel = int(h/3)
w_inc = int(w/12)
h_inc = int(h/12)
for i in range(9):
for j in range(9):
w_s, h_s = i*w_inc, j*h_inc
img_sel = img[w_s:w_s+w_sel, h_s:h_s+h_sel]
cor_ij, mac_ij, coords_ij, msg_ij = get_macbeth_chart(img_sel, ref_data)
if cor_ij > cor:
cor = cor_ij
mac, coords, msg = mac_ij, coords_ij, msg_ij
ii, jj = i, j
w_best, h_best = w_inc, h_inc
d_best = 3
if cor < 0.75 and d_best > 2:
imgs = []
shape = list(img.shape[:2])
w, h = shape
w_sel = int(w/4)
h_sel = int(h/4)
w_inc = int(w/16)
h_inc = int(h/16)
for i in range(13):
for j in range(13):
w_s, h_s = i*w_inc, j*h_inc
img_sel = img[w_s:w_s+w_sel, h_s:h_s+h_sel]
cor_ij, mac_ij, coords_ij, msg_ij = get_macbeth_chart(img_sel, ref_data)
if cor_ij > cor:
cor = cor_ij
mac, coords, msg = mac_ij, coords_ij, msg_ij
ii, jj = i, j
w_best, h_best = w_inc, h_inc
"""
Transform coordinates from subselection to original image
"""
if ii != -1:
for a in range(len(coords)):
for b in range(len(coords[a][0])):
coords[a][0][b][1] += ii*w_best
coords[a][0][b][0] += jj*h_best
"""
initialise coords_fit variable
"""
coords_fit = None
# print('correlation: {}'.format(cor))
"""
print error or success message
"""
print(msg)
Cam.log += '\n' + str(msg)
if msg == success_msg:
coords_fit = coords
Cam.log += '\nMacbeth chart vertices:\n'
Cam.log += '{}'.format(2*np.round(coords_fit[0][0]), 0)
"""
if correlation is lower than 0.75 there may be a risk of macbeth chart
corners not having been located properly. It might be worth running
with show set to true to check where the macbeth chart centres have
been located.
"""
print('Confidence: {:.3f}'.format(cor))
Cam.log += '\nConfidence: {:.3f}'.format(cor)
if cor < 0.75:
print('Caution: Low confidence guess!')
Cam.log += 'WARNING: Low confidence guess!'
# cv2.imshow('MacBeth', mac)
# represent(mac, 'MacBeth chart')
"""
extract data from coords_fit and plot on original image
"""
if show and coords_fit is not None:
copy = img.copy()
verts = coords_fit[0][0]
cents = coords_fit[1][0]
"""
draw circles at vertices of macbeth chart
"""
for vert in verts:
p = tuple(np.round(vert).astype(np.int32))
cv2.circle(copy, p, 10, 1, -1)
"""
draw circles at centres of squares
"""
for i in range(len(cents)):
cent = cents[i]
p = tuple(np.round(cent).astype(np.int32))
"""
draw black circle on white square, white circle on black square an
grey circle everywhere else.
"""
if i == 3:
cv2.circle(copy, p, 8, 0, -1)
elif i == 23:
cv2.circle(copy, p, 8, 1, -1)
else:
cv2.circle(copy, p, 8, 0.5, -1)
copy, _ = reshape(copy, 400)
represent(copy)
return(coords_fit)
def get_macbeth_chart(img, ref_data):
"""
function returns coordinates of macbeth chart vertices and square centres,
along with an error/success message for debugging purposes. Additionally,
it scores the match with a confidence value.
Brief explanation of the macbeth chart locating algorithm:
- Find rectangles within image
- Take rectangles within percentage offset of median perimeter. The
assumption is that these will be the macbeth squares
- For each potential square, find the 24 possible macbeth centre locations
that would produce a square in that location
- Find clusters of potential macbeth chart centres to find the potential
macbeth centres with the most votes, i.e. the most likely ones
- For each potential macbeth centre, use the centres of the squares that
voted for it to find macbeth chart corners
- For each set of corners, transform the possible match into normalised
space and correlate with a reference chart to evaluate the match
- Select the highest correlation as the macbeth chart match, returning the
correlation as the confidence score
"""
"""
get reference macbeth chart data
"""
(ref, ref_w, ref_h, ref_corns) = ref_data
"""
the code will raise and catch a MacbethError in case of a problem, trying
to give some likely reasons why the problem occred, hence the try/except
"""
try:
"""
obtain image, convert to grayscale and normalise
"""
src = img
src, factor = reshape(src, 200)
original = src.copy()
a = 125/np.average(src)
src_norm = cv2.convertScaleAbs(src, alpha=a, beta=0)
"""
This code checks if there are seperate colour channels. In the past the
macbeth locator ran on jpgs and this makes it robust to different
filetypes. Note that running it on a jpg has 4x the pixels of the
average bayer channel so coordinates must be doubled.
This is best done in img_load.py in the get_patches method. The
coordinates and image width, height must be divided by two if the
macbeth locator has been run on a demosaicked image.
"""
if len(src_norm.shape) == 3:
src_bw = cv2.cvtColor(src_norm, cv2.COLOR_BGR2GRAY)
else:
src_bw = src_norm
original_bw = src_bw.copy()
"""
obtain image edges
"""
sigma = 2
src_bw = cv2.GaussianBlur(src_bw, (0, 0), sigma)
t1, t2 = 50, 100
edges = cv2.Canny(src_bw, t1, t2)
"""
dilate edges to prevent self-intersections in contours
"""
k_size = 2
kernel = np.ones((k_size, k_size))
its = 1
edges = cv2.dilate(edges, kernel, iterations=its)
"""
find Contours in image
"""
conts, _ = cv2.findContours(edges, cv2.RETR_TREE,
cv2.CHAIN_APPROX_NONE)
if len(conts) == 0:
raise MacbethError(
'\nWARNING: No macbeth chart found!'
'\nNo contours found in image\n'
'Possible problems:\n'
'- Macbeth chart is too dark or bright\n'
'- Macbeth chart is occluded\n'
)
"""
find quadrilateral contours
"""
epsilon = 0.07
conts_per = []
for i in range(len(conts)):
per = cv2.arcLength(conts[i], True)
poly = cv2.approxPolyDP(conts[i], epsilon*per, True)
if len(poly) == 4 and cv2.isContourConvex(poly):
conts_per.append((poly, per))
if len(conts_per) == 0:
raise MacbethError(
'\nWARNING: No macbeth chart found!'
'\nNo quadrilateral contours found'
'\nPossible problems:\n'
'- Macbeth chart is too dark or bright\n'
'- Macbeth chart is occluded\n'
'- Macbeth chart is out of camera plane\n'
)
"""
sort contours by perimeter and get perimeters within percent of median
"""
conts_per = sorted(conts_per, key=lambda x: x[1])
med_per = conts_per[int(len(conts_per)/2)][1]
side = med_per/4
perc = 0.1
med_low, med_high = med_per*(1-perc), med_per*(1+perc)
squares = []
for i in conts_per:
if med_low <= i[1] and med_high >= i[1]:
squares.append(i[0])
"""
obtain coordinates of nomralised macbeth and squares
"""
square_verts, mac_norm = get_square_verts(0.06)
"""
for each square guess, find 24 possible macbeth chart centres
"""
mac_mids = []
squares_raw = []
for i in range(len(squares)):
square = squares[i]
squares_raw.append(square)
"""
convert quads to rotated rectangles. This is required as the
'squares' are usually quite irregular quadrilaterls, so performing
a transform would result in exaggerated warping and inaccurate
macbeth chart centre placement
"""
rect = cv2.minAreaRect(square)
square = cv2.boxPoints(rect).astype(np.float32)
"""
reorder vertices to prevent 'hourglass shape'
"""
square = sorted(square, key=lambda x: x[0])
square_1 = sorted(square[:2], key=lambda x: x[1])
square_2 = sorted(square[2:], key=lambda x: -x[1])
square = np.array(np.concatenate((square_1, square_2)), np.float32)
square = np.reshape(square, (4, 2)).astype(np.float32)
squares[i] = square
"""
find 24 possible macbeth chart centres by trasnforming normalised
macbeth square vertices onto candidate square vertices found in image
"""
for j in range(len(square_verts)):
verts = square_verts[j]
p_mat = cv2.getPerspectiveTransform(verts, square)
mac_guess = cv2.perspectiveTransform(mac_norm, p_mat)
mac_guess = np.round(mac_guess).astype(np.int32)
"""
keep only if candidate macbeth is within image border
(deprecated)
"""
in_border = True
# for p in mac_guess[0]:
# pptest = cv2.pointPolygonTest(
# img_con,
# tuple(p),
# False
# )
# if pptest == -1:
# in_border = False
# break
if in_border:
mac_mid = np.mean(mac_guess,
axis=1)
mac_mids.append([mac_mid, (i, j)])
if len(mac_mids) == 0:
raise MacbethError(
'\nWARNING: No macbeth chart found!'
'\nNo possible macbeth charts found within image'
'\nPossible problems:\n'
'- Part of the macbeth chart is outside the image\n'
'- Quadrilaterals in image background\n'
)
"""
reshape data
"""
for i in range(len(mac_mids)):
mac_mids[i][0] = mac_mids[i][0][0]
"""
find where midpoints cluster to identify most likely macbeth centres
"""
clustering = cluster.AgglomerativeClustering(
n_clusters=None,
compute_full_tree=True,
distance_threshold=side*2
)
mac_mids_list = [x[0] for x in mac_mids]
if len(mac_mids_list) == 1:
"""
special case of only one valid centre found (probably not needed)
"""
clus_list = []
clus_list.append([mac_mids, len(mac_mids)])
else:
clustering.fit(mac_mids_list)
# try:
# clustering.fit(mac_mids_list)
# except RuntimeWarning as error:
# return(0, None, None, error)
"""
create list of all clusters
"""
clus_list = []
if clustering.n_clusters_ > 1:
for i in range(clustering.labels_.max()+1):
indices = [j for j, x in enumerate(clustering.labels_) if x == i]
clus = []
for index in indices:
clus.append(mac_mids[index])
clus_list.append([clus, len(clus)])
clus_list.sort(key=lambda x: -x[1])
elif clustering.n_clusters_ == 1:
"""
special case of only one cluster found
"""
# print('only 1 cluster')
clus_list.append([mac_mids, len(mac_mids)])
else:
raise MacbethError(
'\nWARNING: No macebth chart found!'
'\nNo clusters found'
'\nPossible problems:\n'
'- NA\n'
)
"""
keep only clusters with enough votes
"""
clus_len_max = clus_list[0][1]
clus_tol = 0.7
for i in range(len(clus_list)):
if clus_list[i][1] < clus_len_max * clus_tol:
clus_list = clus_list[:i]
break
cent = np.mean(clus_list[i][0], axis=0)[0]
clus_list[i].append(cent)
"""
represent most popular cluster centroids
"""
# copy = original_bw.copy()
# copy = cv2.cvtColor(copy, cv2.COLOR_GRAY2RGB)
# copy = cv2.resize(copy, None, fx=2, fy=2)
# for clus in clus_list:
# centroid = tuple(2*np.round(clus[2]).astype(np.int32))
# cv2.circle(copy, centroid, 7, (255, 0, 0), -1)
# cv2.circle(copy, centroid, 2, (0, 0, 255), -1)
# represent(copy)
"""
get centres of each normalised square
"""
reference = get_square_centres(0.06)
"""
for each possible macbeth chart, transform image into
normalised space and find correlation with reference
"""
max_cor = 0
best_map = None
best_fit = None
best_cen_fit = None
best_ref_mat = None
for clus in clus_list:
clus = clus[0]
sq_cents = []
ref_cents = []
i_list = [p[1][0] for p in clus]
for point in clus:
i, j = point[1]
"""
remove any square that voted for two different points within
the same cluster. This causes the same point in the image to be
mapped to two different reference square centres, resulting in
a very distorted perspective transform since cv2.findHomography
simply minimises error.
This phenomenon is not particularly likely to occur due to the
enforced distance threshold in the clustering fit but it is
best to keep this in just in case.
"""
if i_list.count(i) == 1:
square = squares_raw[i]
sq_cent = np.mean(square, axis=0)
ref_cent = reference[j]
sq_cents.append(sq_cent)
ref_cents.append(ref_cent)
"""
At least four squares need to have voted for a centre in
order for a transform to be found
"""
if len(sq_cents) < 4:
raise MacbethError(
'\nWARNING: No macbeth chart found!'
'\nNot enough squares found'
'\nPossible problems:\n'
'- Macbeth chart is occluded\n'
'- Macbeth chart is too dark or bright\n'
)
ref_cents = np.array(ref_cents)
sq_cents = np.array(sq_cents)
"""
find best fit transform from normalised centres to image
"""
h_mat, mask = cv2.findHomography(ref_cents, sq_cents)
if 'None' in str(type(h_mat)):
raise MacbethError(
'\nERROR\n'
)
"""
transform normalised corners and centres into image space
"""
mac_fit = cv2.perspectiveTransform(mac_norm, h_mat)
mac_cen_fit = cv2.perspectiveTransform(np.array([reference]), h_mat)
"""
transform located corners into reference space
"""
ref_mat = cv2.getPerspectiveTransform(
mac_fit,
np.array([ref_corns])
)
map_to_ref = cv2.warpPerspective(
original_bw, ref_mat,
(ref_w, ref_h)
)
"""
normalise brigthness
"""
a = 125/np.average(map_to_ref)
map_to_ref = cv2.convertScaleAbs(map_to_ref, alpha=a, beta=0)
"""
find correlation with bw reference macbeth
"""
cor = correlate(map_to_ref, ref)
"""
keep only if best correlation
"""
if cor > max_cor:
max_cor = cor
best_map = map_to_ref
best_fit = mac_fit
best_cen_fit = mac_cen_fit
best_ref_mat = ref_mat
"""
rotate macbeth by pi and recorrelate in case macbeth chart is
upside-down
"""
mac_fit_inv = np.array(
([[mac_fit[0][2], mac_fit[0][3],
mac_fit[0][0], mac_fit[0][1]]])
)
mac_cen_fit_inv = np.flip(mac_cen_fit, axis=1)
ref_mat = cv2.getPerspectiveTransform(
mac_fit_inv,
np.array([ref_corns])
)
map_to_ref = cv2.warpPerspective(
original_bw, ref_mat,
(ref_w, ref_h)
)
a = 125/np.average(map_to_ref)
map_to_ref = cv2.convertScaleAbs(map_to_ref, alpha=a, beta=0)
cor = correlate(map_to_ref, ref)
if cor > max_cor:
max_cor = cor
best_map = map_to_ref
best_fit = mac_fit_inv
best_cen_fit = mac_cen_fit_inv
best_ref_mat = ref_mat
"""
Check best match is above threshold
"""
cor_thresh = 0.6
if max_cor < cor_thresh:
raise MacbethError(
'\nWARNING: Correlation too low'
'\nPossible problems:\n'
'- Bad lighting conditions\n'
'- Macbeth chart is occluded\n'
'- Background is too noisy\n'
'- Macbeth chart is out of camera plane\n'
)
"""
Following code is mostly representation for debugging purposes
"""
"""
draw macbeth corners and centres on image
"""
copy = original.copy()
copy = cv2.resize(original, None, fx=2, fy=2)
# print('correlation = {}'.format(round(max_cor, 2)))
for point in best_fit[0]:
point = np.array(point, np.float32)
point = tuple(2*np.round(point).astype(np.int32))
cv2.circle(copy, point, 4, (255, 0, 0), -1)
for point in best_cen_fit[0]:
point = np.array(point, np.float32)
point = tuple(2*np.round(point).astype(np.int32))
cv2.circle(copy, point, 4, (0, 0, 255), -1)
copy = copy.copy()
cv2.circle(copy, point, 4, (0, 0, 255), -1)
"""
represent coloured macbeth in reference space
"""
best_map_col = cv2.warpPerspective(
original, best_ref_mat, (ref_w, ref_h)
)
best_map_col = cv2.resize(
best_map_col, None, fx=4, fy=4
)
a = 125/np.average(best_map_col)
best_map_col_norm = cv2.convertScaleAbs(
best_map_col, alpha=a, beta=0
)
# cv2.imshow('Macbeth', best_map_col)
# represent(copy)
"""
rescale coordinates to original image size
"""
fit_coords = (best_fit/factor, best_cen_fit/factor)
return(max_cor, best_map_col_norm, fit_coords, success_msg)
"""
catch macbeth errors and continue with code
"""
except MacbethError as error:
return(0, None, None, error)

View File

@@ -0,0 +1,123 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool noise calibration
from ctt_image_load import *
import matplotlib.pyplot as plt
"""
Find noise standard deviation and fit to model:
noise std = a + b*sqrt(pixel mean)
"""
def noise(Cam, Img, plot):
Cam.log += '\nProcessing image: {}'.format(Img.name)
stds = []
means = []
"""
iterate through macbeth square patches
"""
for ch_patches in Img.patches:
for patch in ch_patches:
"""
renormalise patch
"""
patch = np.array(patch)
patch = (patch-Img.blacklevel_16)/Img.againQ8_norm
std = np.std(patch)
mean = np.mean(patch)
stds.append(std)
means.append(mean)
"""
clean data and ensure all means are above 0
"""
stds = np.array(stds)
means = np.array(means)
means = np.clip(np.array(means), 0, None)
sq_means = np.sqrt(means)
"""
least squares fit model
"""
fit = np.polyfit(sq_means, stds, 1)
Cam.log += '\nBlack level = {}'.format(Img.blacklevel_16)
Cam.log += '\nNoise profile: offset = {}'.format(int(fit[1]))
Cam.log += ' slope = {:.3f}'.format(fit[0])
"""
remove any values further than std from the fit
anomalies most likely caused by:
> ucharacteristically noisy white patch
> saturation in the white patch
"""
fit_score = np.abs(stds - fit[0]*sq_means - fit[1])
fit_std = np.std(stds)
fit_score_norm = fit_score - fit_std
anom_ind = np.where(fit_score_norm > 1)
fit_score_norm.sort()
sq_means_clean = np.delete(sq_means, anom_ind)
stds_clean = np.delete(stds, anom_ind)
removed = len(stds) - len(stds_clean)
if removed != 0:
Cam.log += '\nIdentified and removed {} anomalies.'.format(removed)
Cam.log += '\nRecalculating fit'
"""
recalculate fit with outliers removed
"""
fit = np.polyfit(sq_means_clean, stds_clean, 1)
Cam.log += '\nNoise profile: offset = {}'.format(int(fit[1]))
Cam.log += ' slope = {:.3f}'.format(fit[0])
"""
if fit const is < 0 then force through 0 by
dividing by sq_means and fitting poly order 0
"""
corrected = 0
if fit[1] < 0:
corrected = 1
ones = np.ones(len(means))
y_data = stds/sq_means
fit2 = np.polyfit(ones, y_data, 0)
Cam.log += '\nOffset below zero. Fit recalculated with zero offset'
Cam.log += '\nNoise profile: offset = 0'
Cam.log += ' slope = {:.3f}'.format(fit2[0])
# print('new fit')
# print(fit2)
"""
plot fit for debug
"""
if plot:
x = np.arange(sq_means.max()//0.88)
fit_plot = x*fit[0] + fit[1]
plt.scatter(sq_means, stds, label='data', color='blue')
plt.scatter(sq_means[anom_ind], stds[anom_ind], color='orange', label='anomalies')
plt.plot(x, fit_plot, label='fit', color='red', ls=':')
if fit[1] < 0:
fit_plot_2 = x*fit2[0]
plt.plot(x, fit_plot_2, label='fit 0 intercept', color='green', ls='--')
plt.plot(0, 0)
plt.title('Noise Plot\nImg: {}'.format(Img.str))
plt.legend(loc='upper left')
plt.xlabel('Sqrt Pixel Value')
plt.ylabel('Noise Standard Deviation')
plt.grid()
plt.show()
"""
End of plotting code
"""
"""
format output to include forced 0 constant
"""
Cam.log += '\n'
if corrected:
fit = [fit2[0], 0]
return fit
else:
return fit

View File

@@ -0,0 +1,805 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# ctt_pisp.py - camera tuning tool data for PiSP platforms
json_template = {
"rpi.black_level": {
"black_level": 4096
},
"rpi.lux": {
"reference_shutter_speed": 10000,
"reference_gain": 1,
"reference_aperture": 1.0
},
"rpi.dpc": {
"strength": 1
},
"rpi.noise": {
},
"rpi.geq": {
},
"rpi.denoise":
{
"normal":
{
"sdn":
{
"deviation": 1.6,
"strength": 0.5,
"deviation2": 3.2,
"deviation_no_tdn": 3.2,
"strength_no_tdn": 0.75
},
"cdn":
{
"deviation": 200,
"strength": 0.3
},
"tdn":
{
"deviation": 0.8,
"threshold": 0.05
}
},
"hdr":
{
"sdn":
{
"deviation": 1.6,
"strength": 0.5,
"deviation2": 3.2,
"deviation_no_tdn": 3.2,
"strength_no_tdn": 0.75
},
"cdn":
{
"deviation": 200,
"strength": 0.3
},
"tdn":
{
"deviation": 1.3,
"threshold": 0.1
}
},
"night":
{
"sdn":
{
"deviation": 1.6,
"strength": 0.5,
"deviation2": 3.2,
"deviation_no_tdn": 3.2,
"strength_no_tdn": 0.75
},
"cdn":
{
"deviation": 200,
"strength": 0.3
},
"tdn":
{
"deviation": 1.3,
"threshold": 0.1
}
}
},
"rpi.awb": {
"priors": [
{"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]},
{"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]},
{"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
],
"modes": {
"auto": {"lo": 2500, "hi": 7700},
"incandescent": {"lo": 2500, "hi": 3000},
"tungsten": {"lo": 3000, "hi": 3500},
"fluorescent": {"lo": 4000, "hi": 4700},
"indoor": {"lo": 3000, "hi": 5000},
"daylight": {"lo": 5500, "hi": 6500},
"cloudy": {"lo": 7000, "hi": 8000}
},
"bayes": 1
},
"rpi.agc":
{
"channels":
[
{
"comment": "Channel 0 is normal AGC",
"metering_modes":
{
"centre-weighted":
{
"weights":
[
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
]
},
"spot":
{
"weights":
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
},
"matrix":
{
"weights":
[
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
]
}
},
"exposure_modes":
{
"normal":
{
"shutter": [ 100, 10000, 30000, 60000, 66666 ],
"gain": [ 1.0, 1.5, 2.0, 4.0, 8.0 ]
},
"short":
{
"shutter": [ 100, 5000, 10000, 20000, 60000 ],
"gain": [ 1.0, 1.5, 2.0, 4.0, 8.0 ]
},
"long":
{
"shutter": [ 100, 10000, 30000, 60000, 90000, 120000 ],
"gain": [ 1.0, 1.5, 2.0, 4.0, 8.0, 12.0 ]
}
},
"constraint_modes":
{
"normal": [
{
"bound": "LOWER",
"q_lo": 0.98,
"q_hi": 1.0,
"y_target":
[
0, 0.5,
1000, 0.5
]
}
],
"highlight": [
{
"bound": "LOWER",
"q_lo": 0.98,
"q_hi": 1.0,
"y_target":
[
0, 0.5,
1000, 0.5
]
},
{
"bound": "UPPER",
"q_lo": 0.98,
"q_hi": 1.0,
"y_target":
[
0, 0.8,
1000, 0.8
]
},
],
"shadows": [
{
"bound": "LOWER",
"q_lo": 0.0,
"q_hi": 0.5,
"y_target":
[
0, 0.17,
1000, 0.17
]
}
]
},
"y_target":
[
0, 0.16,
1000, 0.165,
10000, 0.17
]
},
{
"comment": "Channel 1 is the HDR short channel",
"desaturate": 0,
"metering_modes":
{
"centre-weighted":
{
"weights":
[
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
]
},
"spot":
{
"weights":
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
},
"matrix":
{
"weights":
[
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
]
}
},
"exposure_modes":
{
"normal":
{
"shutter": [ 100, 20000, 60000 ],
"gain": [ 1.0, 1.0, 1.0 ]
},
"short":
{
"shutter": [ 100, 20000, 60000 ],
"gain": [ 1.0, 1.0, 1.0 ]
},
"long":
{
"shutter": [ 100, 20000, 60000 ],
"gain": [ 1.0, 1.0, 1.0 ]
}
},
"constraint_modes":
{
"normal": [
{
"bound": "LOWER",
"q_lo": 0.95,
"q_hi": 1.0,
"y_target":
[
0, 0.5,
1000, 0.5
]
},
{
"bound": "UPPER",
"q_lo": 0.95,
"q_hi": 1.0,
"y_target":
[
0, 0.7,
1000, 0.7
]
},
{
"bound": "LOWER",
"q_lo": 0.0,
"q_hi": 0.2,
"y_target":
[
0, 0.002,
1000, 0.002
]
}
],
"highlight": [
{
"bound": "LOWER",
"q_lo": 0.95,
"q_hi": 1.0,
"y_target":
[
0, 0.5,
1000, 0.5
]
},
{
"bound": "UPPER",
"q_lo": 0.95,
"q_hi": 1.0,
"y_target":
[
0, 0.7,
1000, 0.7
]
},
{
"bound": "LOWER",
"q_lo": 0.0,
"q_hi": 0.2,
"y_target":
[
0, 0.002,
1000, 0.002
]
}
],
"shadows": [
{
"bound": "LOWER",
"q_lo": 0.95,
"q_hi": 1.0,
"y_target":
[
0, 0.5,
1000, 0.5
]
},
{
"bound": "UPPER",
"q_lo": 0.95,
"q_hi": 1.0,
"y_target":
[
0, 0.7,
1000, 0.7
]
},
{
"bound": "LOWER",
"q_lo": 0.0,
"q_hi": 0.2,
"y_target":
[
0, 0.002,
1000, 0.002
]
}
]
},
"y_target":
[
0, 0.16,
1000, 0.165,
10000, 0.17
]
},
{
"comment": "Channel 2 is the HDR long channel",
"desaturate": 0,
"metering_modes":
{
"centre-weighted":
{
"weights":
[
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
]
},
"spot":
{
"weights":
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
},
"matrix":
{
"weights":
[
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
]
}
},
"exposure_modes":
{
"normal":
{
"shutter": [ 100, 20000, 30000, 60000 ],
"gain": [ 1.0, 2.0, 4.0, 8.0 ]
},
"short":
{
"shutter": [ 100, 20000, 30000, 60000 ],
"gain": [ 1.0, 2.0, 4.0, 8.0 ]
},
"long":
{
"shutter": [ 100, 20000, 30000, 60000 ],
"gain": [ 1.0, 2.0, 4.0, 8.0 ]
}
},
"constraint_modes":
{
"normal": [
],
"highlight": [
],
"shadows": [
]
},
"channel_constraints":
[
{
"bound": "UPPER",
"channel": 4,
"factor": 8
},
{
"bound": "LOWER",
"channel": 4,
"factor": 2
}
],
"y_target":
[
0, 0.16,
1000, 0.165,
10000, 0.17
]
},
{
"comment": "Channel 3 is the night mode channel",
"base_ev": 0.33,
"metering_modes":
{
"centre-weighted":
{
"weights":
[
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
]
},
"spot":
{
"weights":
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
},
"matrix":
{
"weights":
[
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
]
}
},
"exposure_modes":
{
"normal":
{
"shutter": [ 100, 20000, 66666 ],
"gain": [ 1.0, 2.0, 4.0 ]
},
"short":
{
"shutter": [ 100, 20000, 33333 ],
"gain": [ 1.0, 2.0, 4.0 ]
},
"long":
{
"shutter": [ 100, 20000, 66666, 120000 ],
"gain": [ 1.0, 2.0, 4.0, 4.0 ]
}
},
"constraint_modes":
{
"normal": [
{
"bound": "LOWER",
"q_lo": 0.98,
"q_hi": 1.0,
"y_target":
[
0, 0.5,
1000, 0.5
]
}
],
"highlight": [
{
"bound": "LOWER",
"q_lo": 0.98,
"q_hi": 1.0,
"y_target":
[
0, 0.5,
1000, 0.5
]
},
{
"bound": "UPPER",
"q_lo": 0.98,
"q_hi": 1.0,
"y_target":
[
0, 0.8,
1000, 0.8
]
}
],
"shadows": [
{
"bound": "LOWER",
"q_lo": 0.98,
"q_hi": 1.0,
"y_target":
[
0, 0.5,
1000, 0.5
]
}
]
},
"y_target":
[
0, 0.16,
1000, 0.16,
10000, 0.17
]
}
]
},
"rpi.alsc": {
'omega': 1.3,
'n_iter': 100,
'luminance_strength': 0.8,
},
"rpi.contrast": {
"ce_enable": 1,
"gamma_curve": [
0, 0,
1024, 5040,
2048, 9338,
3072, 12356,
4096, 15312,
5120, 18051,
6144, 20790,
7168, 23193,
8192, 25744,
9216, 27942,
10240, 30035,
11264, 32005,
12288, 33975,
13312, 35815,
14336, 37600,
15360, 39168,
16384, 40642,
18432, 43379,
20480, 45749,
22528, 47753,
24576, 49621,
26624, 51253,
28672, 52698,
30720, 53796,
32768, 54876,
36864, 57012,
40960, 58656,
45056, 59954,
49152, 61183,
53248, 62355,
57344, 63419,
61440, 64476,
65535, 65535
]
},
"rpi.ccm": {
},
"rpi.cac": {
},
"rpi.sharpen": {
"threshold": 0.25,
"limit": 1.0,
"strength": 1.0
},
"rpi.hdr":
{
"Off":
{
"cadence": [ 0 ]
},
"MultiExposureUnmerged":
{
"cadence": [ 1, 2 ],
"channel_map": { "short": 1, "long": 2 }
},
"SingleExposure":
{
"cadence": [1],
"channel_map": { "short": 1 },
"spatial_gain": 2.0,
"tonemap_enable": 1
},
"MultiExposure":
{
"cadence": [1, 2],
"channel_map": { "short": 1, "long": 2 },
"stitch_enable": 1,
"spatial_gain": 2.0,
"tonemap_enable": 1
},
"Night":
{
"cadence": [ 3 ],
"channel_map": { "night": 3 },
"tonemap_enable": 1,
"tonemap":
[
0, 0,
5000, 20000,
10000, 30000,
20000, 47000,
30000, 55000,
65535, 65535
]
}
}
}
grid_size = (32, 32)

View File

@@ -0,0 +1,130 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright 2022 Raspberry Pi Ltd
#
# Script to pretty print a Raspberry Pi tuning config JSON structure in
# version 2.0 and later formats.
import argparse
import json
import textwrap
class Encoder(json.JSONEncoder):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.indentation_level = 0
self.hard_break = 120
self.custom_elems = {
'weights': 15,
'table': 16,
'luminance_lut': 16,
'ct_curve': 3,
'ccm': 3,
'lut_rx': 9,
'lut_bx': 9,
'lut_by': 9,
'lut_ry': 9,
'gamma_curve': 2,
'y_target': 2,
'prior': 2,
'tonemap': 2
}
def encode(self, o, node_key=None):
if isinstance(o, (list, tuple)):
# Check if we are a flat list of numbers.
if not any(isinstance(el, (list, tuple, dict)) for el in o):
s = ', '.join(json.dumps(el) for el in o)
if node_key in self.custom_elems.keys():
# Special case handling to specify number of elements in a row for tables, ccm, etc.
self.indentation_level += 1
sl = s.split(', ')
num = self.custom_elems[node_key]
chunk = [self.indent_str + ', '.join(sl[x:x + num]) for x in range(0, len(sl), num)]
t = ',\n'.join(chunk)
self.indentation_level -= 1
output = f'\n{self.indent_str}[\n{t}\n{self.indent_str}]'
elif len(s) > self.hard_break - len(self.indent_str):
# Break a long list with wraps.
self.indentation_level += 1
t = textwrap.fill(s, self.hard_break, break_long_words=False,
initial_indent=self.indent_str, subsequent_indent=self.indent_str)
self.indentation_level -= 1
output = f'\n{self.indent_str}[\n{t}\n{self.indent_str}]'
else:
# Smaller lists can remain on a single line.
output = f' [ {s} ]'
return output
else:
# Sub-structures in the list case.
self.indentation_level += 1
output = [self.indent_str + self.encode(el) for el in o]
self.indentation_level -= 1
output = ',\n'.join(output)
return f' [\n{output}\n{self.indent_str}]'
elif isinstance(o, dict):
self.indentation_level += 1
output = []
for k, v in o.items():
if isinstance(v, dict) and len(v) == 0:
# Empty config block special case.
output.append(self.indent_str + f'{json.dumps(k)}: {{ }}')
else:
# Only linebreak if the next node is a config block.
sep = f'\n{self.indent_str}' if isinstance(v, dict) else ''
output.append(self.indent_str + f'{json.dumps(k)}:{sep}{self.encode(v, k)}')
output = ',\n'.join(output)
self.indentation_level -= 1
return f'{{\n{output}\n{self.indent_str}}}'
else:
return ' ' + json.dumps(o)
@property
def indent_str(self) -> str:
return ' ' * self.indentation_level * self.indent
def iterencode(self, o, **kwargs):
return self.encode(o)
def pretty_print(in_json: dict, custom_elems={}) -> str:
if 'version' not in in_json or \
'target' not in in_json or \
'algorithms' not in in_json or \
in_json['version'] < 2.0:
raise RuntimeError('Incompatible JSON dictionary has been provided')
encoder = Encoder(indent=4, sort_keys=False)
encoder.custom_elems |= custom_elems
return encoder.encode(in_json) #json.dumps(in_json, cls=Encoder, indent=4, sort_keys=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=
'Prettify a version 2.0 camera tuning config JSON file.')
parser.add_argument('-t', '--target', type=str, help='Target platform', choices=['pisp', 'vc4'], default='vc4')
parser.add_argument('input', type=str, help='Input tuning file.')
parser.add_argument('output', type=str, nargs='?',
help='Output converted tuning file. If not provided, the input file will be updated in-place.',
default=None)
args = parser.parse_args()
with open(args.input, 'r') as f:
in_json = json.load(f)
if args.target == 'pisp':
from ctt_pisp import grid_size
elif args.target == 'vc4':
from ctt_vc4 import grid_size
out_json = pretty_print(in_json, custom_elems={'table': grid_size[0], 'luminance_lut': grid_size[0]})
with open(args.output if args.output is not None else args.input, 'w') as f:
f.write(out_json)

View File

@@ -0,0 +1,71 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool RANSAC selector for Macbeth chart locator
import numpy as np
scale = 2
"""
constructs normalised macbeth chart corners for ransac algorithm
"""
def get_square_verts(c_err=0.05, scale=scale):
"""
define macbeth chart corners
"""
b_bord_x, b_bord_y = scale*8.5, scale*13
s_bord = 6*scale
side = 41*scale
x_max = side*6 + 5*s_bord + 2*b_bord_x
y_max = side*4 + 3*s_bord + 2*b_bord_y
c1 = (0, 0)
c2 = (0, y_max)
c3 = (x_max, y_max)
c4 = (x_max, 0)
mac_norm = np.array((c1, c2, c3, c4), np.float32)
mac_norm = np.array([mac_norm])
square_verts = []
square_0 = np.array(((0, 0), (0, side),
(side, side), (side, 0)), np.float32)
offset_0 = np.array((b_bord_x, b_bord_y), np.float32)
c_off = side * c_err
offset_cont = np.array(((c_off, c_off), (c_off, -c_off),
(-c_off, -c_off), (-c_off, c_off)), np.float32)
square_0 += offset_0
square_0 += offset_cont
"""
define macbeth square corners
"""
for i in range(6):
shift_i = np.array(((i*side, 0), (i*side, 0),
(i*side, 0), (i*side, 0)), np.float32)
shift_bord = np.array(((i*s_bord, 0), (i*s_bord, 0),
(i*s_bord, 0), (i*s_bord, 0)), np.float32)
square_i = square_0 + shift_i + shift_bord
for j in range(4):
shift_j = np.array(((0, j*side), (0, j*side),
(0, j*side), (0, j*side)), np.float32)
shift_bord = np.array(((0, j*s_bord),
(0, j*s_bord), (0, j*s_bord),
(0, j*s_bord)), np.float32)
square_j = square_i + shift_j + shift_bord
square_verts.append(square_j)
# print('square_verts')
# print(square_verts)
return np.array(square_verts, np.float32), mac_norm
def get_square_centres(c_err=0.05, scale=scale):
"""
define macbeth square centres
"""
verts, mac_norm = get_square_verts(c_err, scale=scale)
centres = np.mean(verts, axis=1)
# print('centres')
# print(centres)
return np.array(centres, np.float32)

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,150 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool miscellaneous
import time
import re
import binascii
import os
import cv2
import numpy as np
import imutils
import sys
import matplotlib.pyplot as plt
from sklearn import cluster as cluster
from sklearn.neighbors import NearestCentroid as get_centroids
"""
This file contains some useful tools, the details of which aren't important to
understanding of the code. They ar collated here to attempt to improve code
readability in the main files.
"""
"""
obtain config values, unless it doesnt exist, in which case pick default
Furthermore, it can check if the input is the correct type
"""
def get_config(dictt, key, default, ttype):
try:
val = dictt[key]
if ttype == 'string':
val = str(val)
elif ttype == 'num':
if 'int' not in str(type(val)):
if 'float' not in str(type(val)):
raise ValueError
elif ttype == 'dict':
if not isinstance(val, dict):
raise ValueError
elif ttype == 'list':
if not isinstance(val, list):
raise ValueError
elif ttype == 'bool':
ttype = int(bool(ttype))
else:
val = dictt[key]
except (KeyError, ValueError):
val = default
return val
"""
argument parser
"""
def parse_input():
arguments = sys.argv[1:]
if len(arguments) % 2 != 0:
raise ArgError('\n\nERROR! Enter value for each arguent passed.')
params = arguments[0::2]
vals = arguments[1::2]
args_dict = dict(zip(params, vals))
json_output = get_config(args_dict, '-o', None, 'string')
directory = get_config(args_dict, '-i', None, 'string')
config = get_config(args_dict, '-c', None, 'string')
log_path = get_config(args_dict, '-l', None, 'string')
target = get_config(args_dict, '-t', "vc4", 'string')
if directory is None:
raise ArgError('\n\nERROR! No input directory given.')
if json_output is None:
raise ArgError('\n\nERROR! No output json given.')
return json_output, directory, config, log_path, target
"""
custom arg and macbeth error class
"""
class ArgError(Exception):
pass
class MacbethError(Exception):
pass
"""
correlation function to quantify match
"""
def correlate(im1, im2):
f1 = im1.flatten()
f2 = im2.flatten()
cor = np.corrcoef(f1, f2)
return cor[0][1]
"""
get list of files from directory
"""
def get_photos(directory='photos'):
filename_list = []
for filename in os.listdir(directory):
if 'jp' in filename or '.dng' in filename:
filename_list.append(filename)
return filename_list
"""
display image for debugging... read at your own risk...
"""
def represent(img, name='image'):
# if type(img) == tuple or type(img) == list:
# for i in range(len(img)):
# name = 'image {}'.format(i)
# cv2.imshow(name, img[i])
# else:
# cv2.imshow(name, img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# return 0
"""
code above displays using opencv, but this doesn't catch users pressing 'x'
with their mouse to close the window.... therefore matplotlib is used....
(thanks a lot opencv)
"""
grid = plt.GridSpec(22, 1)
plt.subplot(grid[:19, 0])
plt.imshow(img, cmap='gray')
plt.axis('off')
plt.subplot(grid[21, 0])
plt.title('press \'q\' to continue')
plt.axis('off')
plt.show()
# f = plt.figure()
# ax = f.add_subplot(211)
# ax2 = f.add_subplot(122)
# ax.imshow(img, cmap='gray')
# ax.axis('off')
# ax2.set_figheight(2)
# ax2.title('press \'q\' to continue')
# ax2.axis('off')
# plt.show()
"""
reshape image to fixed width without distorting
returns image and scale factor
"""
def reshape(img, width):
factor = width/img.shape[0]
return cv2.resize(img, None, fx=factor, fy=factor), factor

View File

@@ -0,0 +1,126 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# ctt_vc4.py - camera tuning tool data for VC4 platforms
json_template = {
"rpi.black_level": {
"black_level": 4096
},
"rpi.dpc": {
},
"rpi.lux": {
"reference_shutter_speed": 10000,
"reference_gain": 1,
"reference_aperture": 1.0
},
"rpi.noise": {
},
"rpi.geq": {
},
"rpi.sdn": {
},
"rpi.awb": {
"priors": [
{"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]},
{"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]},
{"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
],
"modes": {
"auto": {"lo": 2500, "hi": 8000},
"incandescent": {"lo": 2500, "hi": 3000},
"tungsten": {"lo": 3000, "hi": 3500},
"fluorescent": {"lo": 4000, "hi": 4700},
"indoor": {"lo": 3000, "hi": 5000},
"daylight": {"lo": 5500, "hi": 6500},
"cloudy": {"lo": 7000, "hi": 8600}
},
"bayes": 1
},
"rpi.agc": {
"metering_modes": {
"centre-weighted": {
"weights": [3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0]
},
"spot": {
"weights": [2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
},
"matrix": {
"weights": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
}
},
"exposure_modes": {
"normal": {
"shutter": [100, 10000, 30000, 60000, 120000],
"gain": [1.0, 2.0, 4.0, 6.0, 6.0]
},
"short": {
"shutter": [100, 5000, 10000, 20000, 120000],
"gain": [1.0, 2.0, 4.0, 6.0, 6.0]
}
},
"constraint_modes": {
"normal": [
{"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]}
],
"highlight": [
{"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]},
{"bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.8, 1000, 0.8]}
]
},
"y_target": [0, 0.16, 1000, 0.165, 10000, 0.17]
},
"rpi.alsc": {
'omega': 1.3,
'n_iter': 100,
'luminance_strength': 0.7,
},
"rpi.contrast": {
"ce_enable": 1,
"gamma_curve": [
0, 0,
1024, 5040,
2048, 9338,
3072, 12356,
4096, 15312,
5120, 18051,
6144, 20790,
7168, 23193,
8192, 25744,
9216, 27942,
10240, 30035,
11264, 32005,
12288, 33975,
13312, 35815,
14336, 37600,
15360, 39168,
16384, 40642,
18432, 43379,
20480, 45749,
22528, 47753,
24576, 49621,
26624, 51253,
28672, 52698,
30720, 53796,
32768, 54876,
36864, 57012,
40960, 58656,
45056, 59954,
49152, 61183,
53248, 62355,
57344, 63419,
61440, 64476,
65535, 65535
]
},
"rpi.ccm": {
},
"rpi.sharpen": {
}
}
grid_size = (16, 12)

View File

@@ -0,0 +1,43 @@
"""
Some code that will save virtual macbeth charts that show the difference between optimised matrices and non optimised matrices
The function creates an image that is 1550 by 1050 pixels wide, and fills it with patches which are 200x200 pixels in size
Each patch contains the ideal color, the color from the original matrix, and the color from the final matrix
_________________
| |
| Ideal Color |
|_______________|
| Old | new |
| Color | Color |
|_______|_______|
Nice way of showing how the optimisation helps change the colors and the color matricies
"""
import numpy as np
from PIL import Image
def visualise_macbeth_chart(macbeth_rgb, original_rgb, new_rgb, output_filename):
image = np.zeros((1050, 1550, 3), dtype=np.uint8)
colorindex = -1
for y in range(6):
for x in range(4): # Creates 6 x 4 grid of macbeth chart
colorindex += 1
xlocation = 50 + 250 * x # Means there is 50px of black gap between each square, more like the real macbeth chart.
ylocation = 50 + 250 * y
for g in range(200):
for i in range(100):
image[xlocation + i, ylocation + g] = macbeth_rgb[colorindex]
xlocation = 150 + 250 * x
ylocation = 50 + 250 * y
for i in range(100):
for g in range(100):
image[xlocation + i, ylocation + g] = original_rgb[colorindex] # Smaller squares below to compare the old colors with the new ones
xlocation = 150 + 250 * x
ylocation = 150 + 250 * y
for i in range(100):
for g in range(100):
image[xlocation + i, ylocation + g] = new_rgb[colorindex]
img = Image.fromarray(image, 'RGB')
img.save(str(output_filename) + 'Generated Macbeth Chart.png')