mirror of
https://github.com/opencv/opencv.git
synced 2026-01-18 17:21:42 +01:00
Merge pull request #27491 from MykhailoTrushch:ca_cpp
Chromatic aberration correction #27491 Merge with https://github.com/opencv/opencv_extra/pull/1266 ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake Parent issue: https://github.com/opencv/opencv/issues/27206 Related PR: [#27490](https://github.com/opencv/opencv/pull/27490) This PR adds chromatic aberration correction in C++ based on calibration data from the python app. This code adds a function for chromatic aberration correction based on the calibration file (Mat correctChromaticAberration(InputArray image, const String& calibration_file)), and a class ChromaticAberrationCorrector which can be used to correct images of the same camera under the same settings (so for the same calibration data that is initialized in the beginning). Also, basic functionality and performance tests are added.
This commit is contained in:
741
apps/chromatic-aberration-calibration/chromatic_calibration.py
Normal file
741
apps/chromatic-aberration-calibration/chromatic_calibration.py
Normal file
@@ -0,0 +1,741 @@
|
||||
# This file is part of OpenCV project.
|
||||
# It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
# of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
'''
|
||||
Camera calibration for chromatic aberration correction
|
||||
The calibration is done of a photo of black discs on white background.
|
||||
The calibration pattern can be found either in
|
||||
opencv_extra/testdata/cv/cameracalibration/chromatic_aberration/chromatic_aberration_pattern_a3.png,
|
||||
or can be replicated using the script for generating patterns:
|
||||
https://github.com/opencv/opencv/blob/4.x/doc/pattern_tools/gen_pattern.py,
|
||||
using the following invocation:
|
||||
|
||||
python doc/pattern_tools/gen_pattern.py \
|
||||
--output fc4_pattern_A3.svg \
|
||||
--type circles \
|
||||
--rows 26 --columns 37 \
|
||||
--units mm \
|
||||
--square_size 11 \
|
||||
--radius_rate 2.75 \
|
||||
--page_width 420 --page_height 297
|
||||
|
||||
And then converted to PNG:
|
||||
|
||||
inkscape fc4_pattern_A3.svg --export-type=png --export-dpi=300 \
|
||||
--export-background=white --export-background-opacity=1 \
|
||||
--export-filename=fc4_pattern_A3.png
|
||||
|
||||
Calibration image is split into b,g,r, and g is used as reference channel.
|
||||
The centres of each circle in red and blue channels are found as centres of ellipses
|
||||
and then calculated on a subpixel level. Each centre in red or blue channel is paired to
|
||||
a respective centre in green channel. Then, a polynomial model of degree 11 is fit onto the image,
|
||||
minimizing the difference between the displacements between centres in green and red/blue
|
||||
and the actual delta computed with polynomial coefficients. The coefficients are then saved in yaml
|
||||
format and can be used in this sample to correct images of the same camera, lens and settings.
|
||||
|
||||
usage:
|
||||
chromatic_calibration.py calibrate [-h] [--degree DEGREE] --coeffs_file YAML_FILE_PATH image [image ...]
|
||||
chromatic_calibration.py correct [-h] --coeffs_file YAML_FILE_PATH [-o OUTPUT] image
|
||||
chromatic_calibration.py full [-h] [--degree DEGREE] --coeffs_file YAML_FILE_PATH [-o OUTPUT] image
|
||||
|
||||
usage example:
|
||||
chromatic_calibration.py calibrate pattern_aberrated.png --coeffs_file calib_result.yaml
|
||||
|
||||
default values:
|
||||
--degree: 11
|
||||
-o, --output: corrected.png
|
||||
'''
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import math
|
||||
import pathlib
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import yaml
|
||||
from scipy.optimize import minimize
|
||||
from scipy.spatial import cKDTree
|
||||
|
||||
|
||||
@dataclass
|
||||
class Polynomial2D:
|
||||
coeffs_x: np.ndarray
|
||||
coeffs_y: np.ndarray
|
||||
degree: int
|
||||
height: int
|
||||
width: int
|
||||
|
||||
def delta(self, x: np.ndarray, y: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
|
||||
mean_x, mean_y = self.width * 0.5, self.height * 0.5
|
||||
inv_std_x, inv_std_y = 1.0 / mean_x, 1.0 / mean_y
|
||||
x_n = (x - mean_x) * inv_std_x
|
||||
y_n = (y - mean_y) * inv_std_y
|
||||
terms = monomial_terms(x_n, y_n, self.degree)
|
||||
dx = terms @ self.coeffs_x
|
||||
dy = terms @ self.coeffs_y
|
||||
return dx.reshape(x.shape), dy.reshape(y.shape)
|
||||
|
||||
|
||||
|
||||
def validate_calibration_dict(data: dict) -> tuple[int, int, int]:
|
||||
required_keys = {
|
||||
"red_channel", "blue_channel", "image_width", "image_height"
|
||||
}
|
||||
missing = required_keys - data.keys()
|
||||
if missing:
|
||||
raise ValueError(f"Missing keys in YAML: {', '.join(missing)}")
|
||||
|
||||
width = int(data["image_width"])
|
||||
height = int(data["image_height"])
|
||||
if width <= 0 or height <= 0:
|
||||
raise ValueError("Image width and height must be positive integers")
|
||||
|
||||
def _get_coeffs(channel: str, axis: str) -> np.ndarray:
|
||||
try:
|
||||
coeffs = np.asarray(data[channel][f"coeffs_{axis}"], dtype=float)
|
||||
except KeyError as e:
|
||||
raise ValueError(f"Missing {axis} coefficients for {channel}") from e
|
||||
if coeffs.ndim != 1:
|
||||
raise ValueError(f"{channel} {axis} coefficients must be a 1‑D list/array")
|
||||
if not np.all(np.isfinite(coeffs)):
|
||||
raise ValueError(f"{channel} {axis} coefficients contain NaN or Inf")
|
||||
return coeffs
|
||||
|
||||
rx = _get_coeffs("red_channel", "x")
|
||||
ry = _get_coeffs("red_channel", "y")
|
||||
bx = _get_coeffs("blue_channel", "x")
|
||||
by = _get_coeffs("blue_channel", "y")
|
||||
|
||||
for channel in ["red_channel", "blue_channel"]:
|
||||
try:
|
||||
rms = data[channel]["rms"]
|
||||
except KeyError as e:
|
||||
raise ValueError(f"Missing rms for {channel}") from e
|
||||
|
||||
for name, cx, cy in [("red", rx, ry), ("blue", bx, by)]:
|
||||
if cx.size != cy.size:
|
||||
raise ValueError(
|
||||
f"{name} channel: coeffs_x ({cx.size}) and coeffs_y "
|
||||
f"({cy.size}) lengths differ"
|
||||
)
|
||||
|
||||
if rx.size != bx.size:
|
||||
raise ValueError(
|
||||
f"Red and blue channels use different polynomial sizes "
|
||||
f"({rx.size} vs {bx.size})"
|
||||
)
|
||||
|
||||
m = rx.size
|
||||
n_float = (math.sqrt(1 + 8*m) - 3) / 2
|
||||
degree = int(round(n_float))
|
||||
expected_m = (degree + 1) * (degree + 2) // 2
|
||||
if expected_m != m:
|
||||
raise ValueError(
|
||||
f"Coefficient count {m} is not triangular (n != (deg+1)*(deg+2)/2); "
|
||||
f"nearest degree would be {degree} (needs {expected_m})"
|
||||
)
|
||||
|
||||
return degree, height, width
|
||||
|
||||
|
||||
def load_calib_result(path: str | None = None) -> dict[str, Any]:
|
||||
path = pathlib.Path(path)
|
||||
with path.open("r") as fh:
|
||||
if path.suffix.lower() in {".yaml", ".yml"}:
|
||||
data = yaml.safe_load(fh)
|
||||
else:
|
||||
raise ValueError("YAML file expected as input for the calibration result")
|
||||
|
||||
deg, height, width = validate_calibration_dict(data)
|
||||
|
||||
red_data = data["red_channel"]
|
||||
blue_data = data["blue_channel"]
|
||||
|
||||
poly_r = Polynomial2D(
|
||||
np.asarray(red_data["coeffs_x"]),
|
||||
np.asarray(red_data["coeffs_y"]),
|
||||
deg,
|
||||
height,
|
||||
width
|
||||
)
|
||||
poly_b = Polynomial2D(
|
||||
np.asarray(blue_data["coeffs_x"]),
|
||||
np.asarray(blue_data["coeffs_y"]),
|
||||
deg,
|
||||
height,
|
||||
width
|
||||
)
|
||||
|
||||
return {
|
||||
"poly_red": poly_r,
|
||||
"poly_blue": poly_b,
|
||||
"image_height": height,
|
||||
"image_width": width,
|
||||
}
|
||||
|
||||
|
||||
def repr_flow_seq(dumper, data):
|
||||
return dumper.represent_sequence('tag:yaml.org,2002:seq',
|
||||
data,
|
||||
flow_style=True)
|
||||
|
||||
|
||||
yaml.SafeDumper.add_representer(list, repr_flow_seq)
|
||||
|
||||
|
||||
def save_calib_result(calib, path: str | None = None) -> None:
|
||||
d = {
|
||||
"blue_channel": {
|
||||
"coeffs_x": calib["poly_blue"].coeffs_x.tolist(),
|
||||
"coeffs_y": calib["poly_blue"].coeffs_y.tolist(),
|
||||
"rms": calib["rms_red"]
|
||||
},
|
||||
"red_channel": {
|
||||
"coeffs_x": calib["poly_red"].coeffs_x.tolist(),
|
||||
"coeffs_y": calib["poly_red"].coeffs_y.tolist(),
|
||||
"rms": calib["rms_blue"]
|
||||
},
|
||||
"image_width": calib["image_width"],
|
||||
"image_height": calib["image_height"]
|
||||
}
|
||||
if path is not None:
|
||||
with open(path, "w") as fh:
|
||||
yaml.safe_dump(d,
|
||||
fh,
|
||||
version=(1, 2),
|
||||
default_flow_style=False,
|
||||
sort_keys=False)
|
||||
|
||||
|
||||
def monomial_terms(x: np.ndarray, y: np.ndarray, degree: int) -> np.ndarray:
|
||||
x = x.flatten()
|
||||
y = y.flatten()
|
||||
terms = []
|
||||
cnt = 0
|
||||
for total in range(degree + 1):
|
||||
for i in range(total + 1):
|
||||
j = total - i
|
||||
terms.append((x ** i) * (y ** j))
|
||||
cnt += 1
|
||||
return np.vstack(terms).T
|
||||
|
||||
|
||||
def detect_disk_centres(
|
||||
img: np.ndarray,
|
||||
*,
|
||||
min_area: int = 20,
|
||||
max_area: int | None = None,
|
||||
circularity_thresh: float = 0.7,
|
||||
morph_kernel: int = 3,
|
||||
) -> np.ndarray:
|
||||
if img.ndim != 2:
|
||||
raise ValueError("detect_disk_centres expects a grayscale image")
|
||||
blur = cv2.GaussianBlur(img, (5, 5), 0)
|
||||
_, mask = cv2.threshold(
|
||||
blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU
|
||||
)
|
||||
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (morph_kernel,) * 2)
|
||||
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)
|
||||
cnts, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
|
||||
|
||||
centres = []
|
||||
|
||||
for c in cnts:
|
||||
if len(c) < 5:
|
||||
continue
|
||||
area = cv2.contourArea(c)
|
||||
if area < min_area:
|
||||
continue
|
||||
if max_area is not None and area > max_area:
|
||||
continue
|
||||
|
||||
peri = cv2.arcLength(c, closed=True)
|
||||
circularity = 4 * np.pi * area / (peri * peri + 1e-12)
|
||||
if circularity < circularity_thresh:
|
||||
continue
|
||||
(cx, cy), (a, b), theta = cv2.fitEllipse(c)
|
||||
|
||||
eps = 1e-6
|
||||
pts = c.reshape(-1, 2).astype(np.float64)
|
||||
ct, st = np.cos(np.radians(theta)), np.sin(np.radians(theta))
|
||||
r = np.array([[ct, st], [-st, ct]])
|
||||
|
||||
# translate points so that they are centered around mean, and rotate them
|
||||
p = (r @ (pts.T - np.array([[cx], [cy]]))).T
|
||||
# ellipse equation
|
||||
f = (p[:, 0] / (a / 2 + eps)) ** 2 + (p[:, 1] / (b / 2 + eps)) ** 2 - 1
|
||||
# gradients of ellipse equation
|
||||
j = np.column_stack(
|
||||
[2 * p[:, 0] / ((a / 2 + eps) ** 2), 2 * p[:, 1] / ((b / 2 + eps) ** 2)]
|
||||
)
|
||||
|
||||
# solve least squares to get delta of centers
|
||||
delta, *_ = np.linalg.lstsq(j, -f, rcond=None)
|
||||
cx -= delta[0]
|
||||
cy -= delta[1]
|
||||
centres.append((cx, cy))
|
||||
|
||||
if len(centres) == 0:
|
||||
raise RuntimeError("No valid disks detected, check function parameters")
|
||||
|
||||
return np.asarray(centres, dtype=np.float32)
|
||||
|
||||
|
||||
def pair_keypoints(
|
||||
ref: np.ndarray,
|
||||
target: np.ndarray,
|
||||
max_error: float = 30.0,
|
||||
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
|
||||
tree = cKDTree(ref)
|
||||
dists, idx = tree.query(target, distance_upper_bound=max_error)
|
||||
mask = np.isfinite(dists)
|
||||
if not np.any(mask):
|
||||
raise RuntimeError("No valid keypoint matches were created")
|
||||
target_valid = target[mask]
|
||||
ref_valid = ref[idx[mask]]
|
||||
disp = ref_valid - target_valid
|
||||
return target_valid[:, 0], target_valid[:, 1], disp
|
||||
|
||||
|
||||
def fit_channel(
|
||||
x: np.ndarray,
|
||||
y: np.ndarray,
|
||||
disp: np.ndarray,
|
||||
degree: int,
|
||||
height: int,
|
||||
width: int,
|
||||
method: str = "L-BFGS-B",
|
||||
) -> tuple[np.ndarray, np.ndarray, float]:
|
||||
mean_x, mean_y = width * 0.5, height * 0.5
|
||||
inv_std_x, inv_std_y = 1.0 / mean_x, 1.0 / mean_y
|
||||
x = (x - mean_x) * inv_std_x
|
||||
y = (y - mean_y) * inv_std_y
|
||||
|
||||
terms = monomial_terms(x, y, degree)
|
||||
m = terms.shape[1]
|
||||
|
||||
def objective(c: np.ndarray) -> float:
|
||||
cx = c[:m]
|
||||
cy = c[m:]
|
||||
pred_x = terms @ cx
|
||||
pred_y = terms @ cy
|
||||
err = np.hstack([pred_x - disp[:, 0], pred_y - disp[:, 1]])
|
||||
if np.any(np.isnan(err)) or np.any(np.isinf(err)):
|
||||
return 1e12
|
||||
return np.sum(err ** 2)
|
||||
|
||||
cx_ls, *_ = np.linalg.lstsq(terms, disp[:, 0], rcond=None)
|
||||
cy_ls, *_ = np.linalg.lstsq(terms, disp[:, 1], rcond=None)
|
||||
c0 = np.hstack([cx_ls, cy_ls])
|
||||
|
||||
res = minimize(objective, c0, method=method, options={
|
||||
"maxiter": 500,
|
||||
"maxfun": 5000,
|
||||
"maxls": 50,
|
||||
"ftol": 1e-9,
|
||||
})
|
||||
|
||||
coeffs_x = res.x[:m]
|
||||
coeffs_y = res.x[m:]
|
||||
rms = math.sqrt(res.fun / disp.shape[0])
|
||||
return coeffs_x, coeffs_y, rms
|
||||
|
||||
|
||||
def fit_polynomials(
|
||||
x_r: np.ndarray,
|
||||
y_r: np.ndarray,
|
||||
disp_r: np.ndarray,
|
||||
x_b: np.ndarray,
|
||||
y_b: np.ndarray,
|
||||
disp_b: np.ndarray,
|
||||
degree: int,
|
||||
height: int,
|
||||
width: int
|
||||
) -> tuple[Polynomial2D, Polynomial2D, float, float]:
|
||||
crx, cry, rms_r = fit_channel(x_r, y_r, disp_r, degree, height, width)
|
||||
cbx, cby, rms_b = fit_channel(x_b, y_b, disp_b, degree, height, width)
|
||||
poly_r = Polynomial2D(crx, cry, degree, height, width)
|
||||
poly_b = Polynomial2D(cbx, cby, degree, height, width)
|
||||
return poly_r, poly_b, rms_r, rms_b
|
||||
|
||||
def calibrate(
|
||||
imgs: list[np.ndarray],
|
||||
degree: int = 11,
|
||||
):
|
||||
xr_all, yr_all, dr_all = [], [], []
|
||||
xb_all, yb_all, db_all = [], [], []
|
||||
h0, w0 = None, None
|
||||
|
||||
for i, img in enumerate(imgs):
|
||||
if img is None or img.ndim != 3 or img.shape[2] != 3:
|
||||
raise ValueError("Expected a BGR color image")
|
||||
|
||||
h, w = img.shape[:2]
|
||||
b, g, r = cv2.split(img)
|
||||
|
||||
pts_g = detect_disk_centres(g)
|
||||
pts_r = detect_disk_centres(r)
|
||||
pts_b = detect_disk_centres(b)
|
||||
|
||||
xr, yr, disp_r = pair_keypoints(pts_g, pts_r)
|
||||
xb, yb, disp_b = pair_keypoints(pts_g, pts_b)
|
||||
if h0 is None:
|
||||
h0, w0 = h, w
|
||||
else:
|
||||
if (h, w) != (h0, w0):
|
||||
raise ValueError(
|
||||
f"All calibration images must have the same resolution; "
|
||||
f"got {(h,w)} vs {(h0,w0)} at image #{i}"
|
||||
)
|
||||
|
||||
xr_all.append(xr)
|
||||
yr_all.append(yr)
|
||||
dr_all.append(disp_r)
|
||||
xb_all.append(xb)
|
||||
yb_all.append(yb)
|
||||
db_all.append(disp_b)
|
||||
|
||||
xr = np.concatenate(xr_all, axis=0)
|
||||
yr = np.concatenate(yr_all, axis=0)
|
||||
disp_r = np.concatenate(dr_all, axis=0)
|
||||
|
||||
xb = np.concatenate(xb_all, axis=0)
|
||||
yb = np.concatenate(yb_all, axis=0)
|
||||
disp_b = np.concatenate(db_all, axis=0)
|
||||
|
||||
poly_r, poly_b, rms_r, rms_b = fit_polynomials(
|
||||
xr, yr, disp_r,
|
||||
xb, yb, disp_b,
|
||||
degree, h0, w0
|
||||
)
|
||||
|
||||
print(f"Calibrated polynomial with degree {degree} on {len(imgs)} images, "
|
||||
f"RMS red: {rms_r:.3f} px; RMS blue: {rms_b:.3f} px")
|
||||
|
||||
return {
|
||||
"poly_red": poly_r,
|
||||
"poly_blue": poly_b,
|
||||
"image_width": w0,
|
||||
"image_height": h0,
|
||||
"rms_red": rms_r,
|
||||
"rms_blue": rms_b,
|
||||
}
|
||||
|
||||
def calibrate_multi_degree(
|
||||
imgs: list[np.ndarray],
|
||||
k0: int,
|
||||
k1: int,
|
||||
) -> dict[int, tuple[Polynomial2D, Polynomial2D, float, float]]:
|
||||
"""
|
||||
Returns a dict mapping degree → (poly_r, poly_b, rms_r, rms_b).
|
||||
"""
|
||||
xr_all, yr_all, dr_all = [], [], []
|
||||
xb_all, yb_all, db_all = [], [], []
|
||||
h0, w0 = None, None
|
||||
|
||||
for i, img in enumerate(imgs):
|
||||
if img is None or img.ndim != 3 or img.shape[2] != 3:
|
||||
raise ValueError("Expected a BGR color image")
|
||||
|
||||
h, w = img.shape[:2]
|
||||
b, g, r = cv2.split(img)
|
||||
|
||||
pts_g = detect_disk_centres(g)
|
||||
pts_r = detect_disk_centres(r)
|
||||
pts_b = detect_disk_centres(b)
|
||||
|
||||
xr, yr, disp_r = pair_keypoints(pts_g, pts_r)
|
||||
xb, yb, disp_b = pair_keypoints(pts_g, pts_b)
|
||||
if h0 is None:
|
||||
h0, w0 = h, w
|
||||
else:
|
||||
if (h, w) != (h0, w0):
|
||||
raise ValueError(
|
||||
f"All calibration images must have the same resolution; "
|
||||
f"got {(h,w)} vs {(h0,w0)} at image #{i}"
|
||||
)
|
||||
|
||||
xr_all.append(xr)
|
||||
yr_all.append(yr)
|
||||
dr_all.append(disp_r)
|
||||
xb_all.append(xb)
|
||||
yb_all.append(yb)
|
||||
db_all.append(disp_b)
|
||||
|
||||
xr = np.concatenate(xr_all, axis=0)
|
||||
yr = np.concatenate(yr_all, axis=0)
|
||||
disp_r = np.concatenate(dr_all, axis=0)
|
||||
|
||||
xb = np.concatenate(xb_all, axis=0)
|
||||
yb = np.concatenate(yb_all, axis=0)
|
||||
disp_b = np.concatenate(db_all, axis=0)
|
||||
|
||||
results = {}
|
||||
for deg in range(k0, k1+1):
|
||||
print(deg)
|
||||
|
||||
poly_r, poly_b, rms_r, rms_b = fit_polynomials(
|
||||
xr,
|
||||
yr,
|
||||
disp_r,
|
||||
xb,
|
||||
yb,
|
||||
disp_b,
|
||||
deg,
|
||||
h0,
|
||||
w0
|
||||
)
|
||||
print(f"Calibrated polynomial with degree {deg}, RMS red: {rms_r:.3f} px; RMS blue: {rms_b:.3f} px")
|
||||
results[deg] = (poly_r, poly_b, rms_r, rms_b)
|
||||
return results
|
||||
|
||||
|
||||
def build_remap(
|
||||
h: int,
|
||||
w: int,
|
||||
poly: Polynomial2D,
|
||||
) -> tuple[np.ndarray, np.ndarray]:
|
||||
x, y = np.meshgrid(np.arange(w, dtype=np.float32), np.arange(h, dtype=np.float32))
|
||||
dx, dy = poly.delta(x, y)
|
||||
map_x = (x - dx).astype(np.float32)
|
||||
map_y = (y - dy).astype(np.float32)
|
||||
return map_x, map_y
|
||||
|
||||
|
||||
def correct_image(
|
||||
img: np.ndarray,
|
||||
calib: dict[str, Any],
|
||||
) -> np.ndarray:
|
||||
if img.ndim != 3 or img.shape[2] != 3:
|
||||
raise ValueError("correct_image expects a BGR colour image")
|
||||
|
||||
h, w = img.shape[:2]
|
||||
b, g, r = cv2.split(img)
|
||||
map_x_r, map_y_r = build_remap(h, w, calib["poly_red"])
|
||||
map_x_b, map_y_b = build_remap(h, w, calib["poly_blue"])
|
||||
|
||||
r_corr = cv2.remap(r, map_x_r, map_y_r, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
|
||||
b_corr = cv2.remap(b, map_x_b, map_y_b, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
|
||||
|
||||
map_x_g, map_y_g = np.meshgrid(
|
||||
np.arange(w, dtype=np.float32),
|
||||
np.arange(h, dtype=np.float32)
|
||||
)
|
||||
|
||||
g_corr = cv2.remap(g, map_x_g, map_y_g,
|
||||
cv2.INTER_LINEAR,
|
||||
borderMode=cv2.BORDER_REPLICATE)
|
||||
|
||||
corrected = cv2.merge((b_corr, g_corr, r_corr))
|
||||
return corrected
|
||||
|
||||
def detect_disk_contours(
|
||||
img: np.ndarray,
|
||||
*,
|
||||
min_area: int = 20,
|
||||
max_area: int | None = None,
|
||||
circularity_thresh: float = 0.7,
|
||||
morph_kernel: int = 3,
|
||||
) -> list[np.ndarray]:
|
||||
"""
|
||||
Find all external contours of “discs” in a binary mask of `img` and return
|
||||
their raw point coordinates as a list of (N_i,2) float32 arrays.
|
||||
"""
|
||||
if img.ndim != 2:
|
||||
raise ValueError("detect_disk_contours expects a grayscale image")
|
||||
blur = cv2.GaussianBlur(img, (5, 5), 0)
|
||||
_, mask = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
|
||||
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (morph_kernel,)*2)
|
||||
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)
|
||||
|
||||
cnts, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
|
||||
contours = []
|
||||
for c in cnts:
|
||||
if len(c) < 5:
|
||||
continue
|
||||
area = cv2.contourArea(c)
|
||||
if area < min_area or (max_area is not None and area > max_area):
|
||||
continue
|
||||
peri = cv2.arcLength(c, True)
|
||||
circ = 4 * math.pi * area / (peri*peri + 1e-12)
|
||||
if circ < circularity_thresh:
|
||||
continue
|
||||
pts = c.reshape(-1, 2).astype(np.float32)
|
||||
contours.append(pts)
|
||||
if not contours:
|
||||
raise RuntimeError("No valid disk contours found")
|
||||
return contours
|
||||
|
||||
def warp_and_compare(contours_src: list[np.ndarray],
|
||||
poly_src: Polynomial2D,
|
||||
pts_ref: np.ndarray) -> np.ndarray:
|
||||
"""
|
||||
Warp src-channel contours through poly_src.delta,
|
||||
then compute for each warped point its distance to the nearest
|
||||
green contour point in pts_ref.
|
||||
"""
|
||||
pts = np.vstack(contours_src)
|
||||
xs, ys = pts[:,0], pts[:,1]
|
||||
dx, dy = poly_src.delta(xs, ys)
|
||||
warped = np.column_stack([xs - dx, ys - dy])
|
||||
|
||||
tree = cKDTree(pts_ref)
|
||||
dists, _ = tree.query(warped, k=1)
|
||||
return dists
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
p = argparse.ArgumentParser(
|
||||
description="Chromatic aberration calibration and correction tool",
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||
)
|
||||
sub = p.add_subparsers(dest="cmd", required=True)
|
||||
|
||||
sc = sub.add_parser("calibrate", help="Calibrate from calibration target image")
|
||||
sc.add_argument("image", nargs="+", help="One or more images of black‑disk calibration target")
|
||||
sc.add_argument("--degree", type=int, default=11, help="Polynomial degree")
|
||||
sc.add_argument("--coeffs_file", required=True, help="Save coefficients to YAML file")
|
||||
|
||||
sr = sub.add_parser("correct", help="Correct a photograph using saved coefficients")
|
||||
sr.add_argument("image", help="Input image to be corrected")
|
||||
sr.add_argument("--coeffs_file", required=True,
|
||||
help="Calibration coefficient file (.json/.yaml)")
|
||||
sr.add_argument("-o", "--output", default="corrected.png", help="Output filename")
|
||||
|
||||
sf = sub.add_parser("full",help="Calibrate from calibration target image and \
|
||||
correct the calibration target")
|
||||
sf.add_argument("image", nargs="+", help="One or more images of black‑disk calibration target")
|
||||
sf.add_argument("--degree", type=int, default=11, help="Polynomial degree")
|
||||
sf.add_argument("--coeffs_file", required=True, help="Save coefficients to YAML file")
|
||||
sf.add_argument("-o", "--output", default="corrected.png", help="Output filename")
|
||||
|
||||
ss = sub.add_parser("scan", help="Sweep degree range and report errors")
|
||||
ss.add_argument("image", nargs="+", help="Calibration image path")
|
||||
ss.add_argument("--degree_range", nargs=2, type=int, metavar=("k0","k1"),
|
||||
required=True, help="Inclusive degree range to scan")
|
||||
ss.add_argument("--method", default="POWELL", help="Optimizer method")
|
||||
|
||||
return p.parse_args()
|
||||
|
||||
|
||||
def cmd_calibrate(parsed_args: argparse.Namespace) -> None:
|
||||
paths = parsed_args.image if isinstance(parsed_args.image, list) else [parsed_args.image]
|
||||
imgs = []
|
||||
for p in paths:
|
||||
im = cv2.imread(p, cv2.IMREAD_COLOR)
|
||||
if im is None:
|
||||
raise FileNotFoundError(p)
|
||||
imgs.append(im)
|
||||
|
||||
calib = calibrate(imgs, degree=parsed_args.degree)
|
||||
save_calib_result(calib, path=parsed_args.coeffs_file)
|
||||
print("Saved coefficients to", parsed_args.coeffs_file)
|
||||
|
||||
|
||||
def cmd_correct(parsed_args: argparse.Namespace) -> None:
|
||||
path = parsed_args.image
|
||||
|
||||
fs = cv2.FileStorage(parsed_args.coeffs_file, cv2.FileStorage_READ)
|
||||
if not fs.isOpened():
|
||||
print(f"Could not calibration coefficients from {parsed_args.coeffs_file}")
|
||||
return
|
||||
coeff_mat, calib_size, degree = cv2.loadChromaticAberrationParams(fs.root())
|
||||
|
||||
img = cv2.imread(path, cv2.IMREAD_COLOR)
|
||||
if img is None:
|
||||
print(f"Could not read image {path}")
|
||||
return
|
||||
|
||||
fixed = cv2.correctChromaticAberration(img, coeff_mat, calib_size, degree)
|
||||
|
||||
cv2.imwrite(parsed_args.output, fixed)
|
||||
print(f"Corrected image written to {parsed_args.output}")
|
||||
|
||||
|
||||
def cmd_full(parsed_args: argparse.Namespace) -> None:
|
||||
paths = parsed_args.image if isinstance(parsed_args.image, list) else [parsed_args.image]
|
||||
imgs = []
|
||||
for p in paths:
|
||||
im = cv2.imread(p, cv2.IMREAD_COLOR)
|
||||
if im is None:
|
||||
raise FileNotFoundError(p)
|
||||
imgs.append(im)
|
||||
|
||||
calib = calibrate(imgs, degree=parsed_args.degree)
|
||||
img_for_correction = imgs[0]
|
||||
save_calib_result(calib, path=parsed_args.coeffs_file)
|
||||
print("Saved coefficients to", parsed_args.coeffs_file)
|
||||
|
||||
fs = cv2.FileStorage(parsed_args.coeffs_file, cv2.FileStorage_READ)
|
||||
if not fs.isOpened():
|
||||
print(f"Could not calibration coefficients from {parsed_args.coeffs_file}")
|
||||
return
|
||||
coeff_mat, calib_size, degree = cv2.loadChromaticAberrationParams(fs.root())
|
||||
|
||||
fixed = cv2.correctChromaticAberration(img_for_correction, coeff_mat, calib_size, degree)
|
||||
cv2.imwrite(parsed_args.output, fixed)
|
||||
print(f"Corrected image written to {parsed_args.output}")
|
||||
|
||||
|
||||
def cmd_scan(parsed_args: argparse.Namespace) -> None:
|
||||
paths = parsed_args.image if isinstance(parsed_args.image, list) else [parsed_args.image]
|
||||
imgs = []
|
||||
for p in paths:
|
||||
im = cv2.imread(p, cv2.IMREAD_COLOR)
|
||||
if im is None:
|
||||
raise FileNotFoundError(p)
|
||||
imgs.append(im)
|
||||
|
||||
k0, k1 = parsed_args.degree_range
|
||||
results = calibrate_multi_degree(imgs, k0, k1)
|
||||
|
||||
all_contours_b = []
|
||||
all_contours_g = []
|
||||
all_contours_r = []
|
||||
|
||||
for img in imgs:
|
||||
b, g, r = cv2.split(img)
|
||||
all_contours_b.extend(detect_disk_contours(b))
|
||||
all_contours_g.extend(detect_disk_contours(g))
|
||||
all_contours_r.extend(detect_disk_contours(r))
|
||||
|
||||
pts_g = np.vstack(all_contours_g)
|
||||
|
||||
print(f"Reference degree: {k1}\n")
|
||||
header = "deg | max_r mean_r std_r | max_b mean_b std_b"
|
||||
print(header)
|
||||
print("-" * len(header))
|
||||
|
||||
for deg in sorted(results):
|
||||
if deg == k1:
|
||||
continue
|
||||
pr, pb, _, _ = results[deg]
|
||||
|
||||
d_r = warp_and_compare(all_contours_r, pr, pts_g)
|
||||
d_b = warp_and_compare(all_contours_b, pb, pts_g)
|
||||
|
||||
s = {
|
||||
'max_r': d_r.max(), 'mean_r': d_r.mean(), 'std_r': d_r.std(),
|
||||
'max_b': d_b.max(), 'mean_b': d_b.mean(), 'std_b': d_b.std()
|
||||
}
|
||||
|
||||
print(f"{deg:3d} | "
|
||||
f"{s['max_r']:8.3f} {s['mean_r']:8.3f} {s['std_r']:8.3f} | "
|
||||
f"{s['max_b']:8.3f} {s['mean_b']:8.3f} {s['std_b']:8.3f}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
if args.cmd == "calibrate":
|
||||
cmd_calibrate(args)
|
||||
elif args.cmd == "correct":
|
||||
cmd_correct(args)
|
||||
elif args.cmd == "full":
|
||||
cmd_full(args)
|
||||
elif args.cmd == "scan":
|
||||
cmd_scan(args)
|
||||
@@ -1625,3 +1625,12 @@
|
||||
volume = {7},
|
||||
url = {https://doi.org/10.1007/BF01898354}
|
||||
}
|
||||
|
||||
@inproceedings{rudakova2013precise,
|
||||
title={Precise correction of lateral chromatic aberration in images},
|
||||
author={Rudakova, Victoria and Monasse, Pascal},
|
||||
booktitle={Pacific-Rim Symposium on Image and Video Technology},
|
||||
pages={12--22},
|
||||
year={2013},
|
||||
url={https://enpc.hal.science/hal-00858703/document}
|
||||
}
|
||||
|
||||
BIN
doc/py_tutorials/py_photo/py_chromatic_aberration/images/ca1.png
Normal file
BIN
doc/py_tutorials/py_photo/py_chromatic_aberration/images/ca1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 108 KiB |
BIN
doc/py_tutorials/py_photo/py_chromatic_aberration/images/ca2.png
Normal file
BIN
doc/py_tutorials/py_photo/py_chromatic_aberration/images/ca2.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 176 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 19 KiB |
@@ -0,0 +1,100 @@
|
||||
Chromatic Aberration Correction {#tutorial_py_chromatic_aberration}
|
||||
================
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
In this chapter, we will learn how to
|
||||
|
||||
- Calibrate your camera and get the coefficients to correct lateral chromatic aberration.
|
||||
|
||||
- Export these coefficients that model the red/blue channel misalignments.
|
||||
|
||||
- Correct images using functions in OpenCV.
|
||||
|
||||
Basics
|
||||
------
|
||||
|
||||
Lateral chromatic aberration occurs when different wavelengths focus at slightly different image positions. This results in red/blue fringes at the high-contrast edges, and is particularly common in old or lower-quality cameras and lenses. It is a property of the lens and appears consistently in every image taken with that camera and lens.
|
||||
|
||||

|
||||
|
||||
Image credit: PawełS, CC BY-SA 3.0 <http://creativecommons.org/licenses/by-sa/3.0/>, via Wikimedia Commons
|
||||
|
||||
We treat lateral chromatic aberration as a geometric distortion of red and blue channels relative to the reference green, and aim to estimate a mapping that aligns the red and blue channels to green.
|
||||
|
||||
The correction follows the paper of Rudakova et al. on the lateral chromatic aberration. The misalignment in each channel is modeled as a polynomial of some degree. The distance between the precise locations of centers in red/blue and green channels is minimized with a warp of these centers.
|
||||
|
||||
The paper also proposed to use the calibration pattern of black discs, many more than the polynomial model coefficients count to get a proper fit. Degree 11 is often used, but smaller degrees can achieve similar level of accuracy with much better performance.
|
||||
|
||||
 
|
||||
|
||||
Calibration
|
||||
------
|
||||
|
||||
To create a model of the misalignments of the channels, we use the following calibration procedure:
|
||||
|
||||
1. Print out the calibration photo available in [opencv_extra/testdata/cv/cameracalibration/chromatic_aberration/chromatic_aberration_pattern_a3.png](https://github.com/opencv/opencv_extra/tree/5.x/testdata/cv/cameracalibration/chromatic_aberration/chromatic_aberration_pattern_a3.png). The photo is a grid of black discs on a white background, and as the chromatic aberration fringes appear on the edges of objects in the photo, we will be able to see many different misalignments and model them precisely.
|
||||
|
||||
2. Take one or more images of the printed out calibration grid using your camera. Make sure that all of the discs are in the photo, and that the grid fills as much place as possible, as the chromatic aberration is the strongest at the edges and corners of the photo. You should be able to see color fringes by eye.
|
||||
|
||||
3. Run calibraion, see [chromatic_calibration.py](../../../../apps/chromatic-aberration-calibration/chromatic_calibration.py). The app can be used as follows:
|
||||
|
||||
```
|
||||
chromatic_calibration.py calibrate [-h] [--degree DEGREE] --coeffs_file YAML image
|
||||
chromatic_calibration.py correct [-h] --coeffs_file YAML [-o OUTPUT] image
|
||||
chromatic_calibration.py full [-h] [--degree DEGREE] --coeffs_file YAML [-o OUTPUT] image
|
||||
chromatic_calibration.py scan [-h] --degree_range k0 k1 image
|
||||
```
|
||||
|
||||
Calibrate estimates polynomial coefficients and outputs them to a YAML file to be used with correction functions.
|
||||
|
||||
- Splits BGR, finds disk centers per channel at sub-pixel precision.
|
||||
- Pairs centers to green via KD-tree.
|
||||
- Builds monomial terms up to `--degree` and solves least squares, then refines with another optimization algorithm.
|
||||
- Saves a YAML with:
|
||||
- `image_width`, `image_height`
|
||||
- `red_channel/blue_channel`: `coeffs_x`, `coeffs_y` (length $M=(d+1)(d+2)/2$), and `rms` residuals.
|
||||
|
||||
Scan sweeps polynomial degree range and compares quality. Although higher degrees should almost always model the aberration better, lower degrees can be much faster.
|
||||
|
||||
- Runs calibration for each degree in k0,..,k1 inclusive to fit models for each degree.
|
||||
- Extracts full disk contours per channel.
|
||||
- Warps R/B contours toward G using each degree’s polynomials and measures nearest-neighbor distances.
|
||||
- Prints a table of max / mean / std distances (in pixels) for red and blue.
|
||||
- The user can then choose what degree works best and calibrate the camera with that specific degree.
|
||||
|
||||
Code
|
||||
----
|
||||
|
||||
Minimal Python example for chromatic aberration correction:
|
||||
|
||||
```
|
||||
import cv2 as cv
|
||||
|
||||
INPUT = "path/to/input.jpg"
|
||||
CALIB_YAML = "path/to/ca_photo_calib.yaml"
|
||||
OUTPUT = "corrected.png"
|
||||
BAYER = -1
|
||||
SHOW = True
|
||||
|
||||
FileStorage fs(parsed_args.coeffs_file, FileStorage::READ);
|
||||
coeffMat, calib_size, degree = cv2.loadChromaticAberrationParams(fs.root())
|
||||
corrected = cv.correctChromaticAberration(img, coeffMat, calib_size, degree, BAYER)
|
||||
|
||||
if SHOW:
|
||||
cv.namedWindow("Original", cv.WINDOW_AUTOSIZE)
|
||||
cv.namedWindow("Corrected", cv.WINDOW_AUTOSIZE)
|
||||
cv.imshow("Original", img)
|
||||
cv.imshow("Corrected", corrected)
|
||||
print("Press any key to close...")
|
||||
cv.waitKey(0)
|
||||
cv.destroyAllWindows()
|
||||
|
||||
cv.imwrite(OUTPUT, corrected)
|
||||
```
|
||||
|
||||
|
||||
Additional Resources
|
||||
--------------------
|
||||
@cite rudakova2013precise
|
||||
@@ -18,3 +18,7 @@ denoising etc.
|
||||
- @subpage tutorial_py_hdr
|
||||
|
||||
Learn how to merge exposure sequence and process high dynamic range images.
|
||||
|
||||
- @subpage tutorial_py_chromatic_aberration
|
||||
|
||||
Correct chromatic aberration in your camera's photos by calibrating the camera
|
||||
|
||||
@@ -895,6 +895,69 @@ CV_EXPORTS_W void stylization(InputArray src, OutputArray dst, float sigma_s = 6
|
||||
|
||||
//! @} photo_render
|
||||
|
||||
//! @addtogroup photo_ca_correction Chromatic Aberration Correction
|
||||
//! @{
|
||||
|
||||
/** @example samples/cpp/snippets/chromatic_aberration_correction.cpp
|
||||
An example correcting chromatic aberration with C++
|
||||
*/
|
||||
/** @example samples/python/snippets/chromatic_aberration_correction.py
|
||||
* An example correcting chromatic aberration with Python
|
||||
*/
|
||||
/** @brief Corrects lateral chromatic aberration in an image using polynomial distortion model.
|
||||
|
||||
This function loads polynomial calibration data from the specified file and applies
|
||||
a channel‐specific warp to remove chromatic aberration.
|
||||
If @p input_image has one channel, it is assumed to be a raw Bayer image and is
|
||||
first demosaiced using @p bayer_pattern. If it has three channels, it is treated
|
||||
as a BGR image and @p bayer_pattern is ignored.
|
||||
|
||||
Firstly, calibration needs to be done using apps/chromatic-aberration-calibration/ca_calibration.py on a photo of
|
||||
a pattern of black discs on white background, included in opencv_extra/testdata/cv/cameracalibration/chromatic_aberration/chromatic_aberration_pattern_a3.png
|
||||
|
||||
Calibration and correction are based on the algorithm described in @cite rudakova2013precise.
|
||||
The chromatic aberration is modeled as a polynomial of some degree in red and blue channels compared to green.
|
||||
In calibration, a photo of many black discs on white background is used, and the displacements
|
||||
between the centres of discs in red and blue channels compared to green are minimized. The coefficients
|
||||
are then saved in a yaml file which can be used with this function to correct lateral chromatic aberration.
|
||||
|
||||
@param input_image Input BGR image to correct
|
||||
@param coefficients Coefficient model
|
||||
@param output_image Corrected BGR image
|
||||
@param image_size Size of images for the calibration coefficient model
|
||||
@param calib_degree Degree of the calibration coefficient model
|
||||
@param bayer_pattern Bayer pattern code (e.g. cv::COLOR_BayerBG2BGR) used for
|
||||
demosaicing when @p input_image has one channel; ignored otherwise.
|
||||
|
||||
@sa loadChromaticAberrationParams, demosaicing
|
||||
*/
|
||||
CV_EXPORTS_W void correctChromaticAberration(InputArray input_image, InputArray coefficients, OutputArray output_image,
|
||||
const Size& image_size, int calib_degree, int bayer_pattern = -1);
|
||||
|
||||
/** @brief Load chromatic-aberration calibration parameters from opened FileStorage.
|
||||
*
|
||||
R e*ads the red and blue polynomial coefficients from the specified file and
|
||||
packs them into a 4×N CV_32F matrix:
|
||||
row 0 = blue dx coefficients
|
||||
row 1 = blue dy coefficients
|
||||
row 2 = red dx coefficients
|
||||
row 3 = red dy coefficients
|
||||
|
||||
@param FileNode Node of opened cv::FileStorage object.
|
||||
@param coeffMat Output 4xN coefficient matrix (CV_32F).
|
||||
@param degree Polynomial degree inferred from N.
|
||||
@param calib_size Calibration image size read from file.
|
||||
|
||||
@sa correctChromaticAberration
|
||||
*/
|
||||
CV_EXPORTS_W void loadChromaticAberrationParams(
|
||||
const FileNode& node,
|
||||
OutputArray coeffMat,
|
||||
CV_OUT Size& calib_size,
|
||||
CV_OUT int& degree);
|
||||
|
||||
//! @} photo_ca_correction
|
||||
|
||||
//! @} photo
|
||||
|
||||
} // cv
|
||||
|
||||
39
modules/photo/perf/perf_chromatic_aberration.cpp
Normal file
39
modules/photo/perf/perf_chromatic_aberration.cpp
Normal file
@@ -0,0 +1,39 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#include "perf_precomp.hpp"
|
||||
|
||||
namespace opencv_test
|
||||
{
|
||||
namespace
|
||||
{
|
||||
|
||||
PERF_TEST(ChromaticAberration, CorrectChromaticAberration)
|
||||
{
|
||||
std::string calib_file = getDataPath("cv/cameracalibration/chromatic_aberration/ca_photo_calib.yaml");
|
||||
std::string image_file = getDataPath("cv/cameracalibration/chromatic_aberration/ca_photo.png");
|
||||
|
||||
cv::Mat src = cv::imread(image_file);
|
||||
ASSERT_FALSE(src.empty()) << "Could not load input image";
|
||||
ASSERT_EQ(src.type(), CV_8UC3);
|
||||
|
||||
cv::Mat coeffMat;
|
||||
int degree = -1;
|
||||
Size calib_size = {-1,-1};
|
||||
|
||||
FileStorage fs(calib_file, FileStorage::READ);
|
||||
ASSERT_TRUE(fs.isOpened());
|
||||
ASSERT_NO_THROW(cv::loadChromaticAberrationParams(fs.root(), coeffMat, calib_size, degree));
|
||||
|
||||
cv::Mat dst;
|
||||
|
||||
TEST_CYCLE()
|
||||
{
|
||||
cv::correctChromaticAberration(src, coeffMat, dst, calib_size, degree);
|
||||
}
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace opencv_test
|
||||
242
modules/photo/src/chromatic_aberration_correction.cpp
Normal file
242
modules/photo/src/chromatic_aberration_correction.cpp
Normal file
@@ -0,0 +1,242 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
namespace cv {
|
||||
|
||||
CV_EXPORTS_W void loadChromaticAberrationParams(const FileNode& fs,
|
||||
OutputArray coeffMat,
|
||||
Size& calib_size,
|
||||
int& degree)
|
||||
{
|
||||
int imgW = 0, imgH = 0;
|
||||
fs["image_width"] >> imgW;
|
||||
fs["image_height"] >> imgH;
|
||||
|
||||
CV_Assert(imgW > 0 && imgH > 0);
|
||||
|
||||
auto readChannel = [&](const char* key,
|
||||
std::vector<double>& coeffs_x,
|
||||
std::vector<double>& coeffs_y,
|
||||
int& deg_out)
|
||||
{
|
||||
FileNode ch = fs[key];
|
||||
if (ch.empty())
|
||||
CV_Error_(Error::StsParseError,
|
||||
("Missing channel \"%s\"", key));
|
||||
|
||||
ch["coeffs_x"] >> coeffs_x;
|
||||
ch["coeffs_y"] >> coeffs_y;
|
||||
|
||||
if (coeffs_x.empty() || coeffs_y.empty())
|
||||
CV_Error_(Error::StsParseError,
|
||||
("%s: coeffs_x/coeffs_y missing", key));
|
||||
|
||||
if (coeffs_x.size() != coeffs_y.size())
|
||||
CV_Error_(Error::StsBadSize,
|
||||
("%s: coeffs_x (%zu) vs coeffs_y (%zu)",
|
||||
key, coeffs_x.size(), coeffs_y.size()));
|
||||
|
||||
if (!checkRange(coeffs_x, true) || !checkRange(coeffs_y, true))
|
||||
CV_Error_(Error::StsBadArg,
|
||||
("%s: coefficient array contains NaN/Inf", key));
|
||||
size_t m = coeffs_x.size();
|
||||
double n_float = (std::sqrt(1.0 + 8.0 * m) - 3.0) / 2.0;
|
||||
int deg = static_cast<int>(std::round(n_float));
|
||||
size_t expected_m = static_cast<size_t>((deg + 1) * (deg + 2) / 2);
|
||||
if (m != expected_m){
|
||||
CV_Error_(Error::StsBadArg,
|
||||
("Coefficient count %zu is not triangular for degree %d "
|
||||
"(expected %zu)", m, deg, expected_m));
|
||||
}
|
||||
deg_out = deg;
|
||||
};
|
||||
|
||||
std::vector<double> red_x, red_y, blue_x, blue_y;
|
||||
int deg_red = 0, deg_blue = 0;
|
||||
readChannel("red_channel", red_x, red_y, deg_red);
|
||||
readChannel("blue_channel", blue_x, blue_y, deg_blue);
|
||||
|
||||
|
||||
if (red_x.size() != blue_x.size()){
|
||||
CV_Error_(Error::StsBadSize,
|
||||
("Red (%zu) and blue (%zu) coefficient counts differ",
|
||||
red_x.size(), blue_x.size()));
|
||||
}
|
||||
if (deg_red != deg_blue){
|
||||
CV_Error_(Error::StsBadArg,
|
||||
("Red (%d) and blue (%d) degrees differ",
|
||||
deg_red, deg_blue));
|
||||
}
|
||||
|
||||
const int mterms = (int)red_x.size();
|
||||
|
||||
Mat tmp(4, mterms, CV_32F);
|
||||
|
||||
float* Bx = tmp.ptr<float>(0);
|
||||
float* By = tmp.ptr<float>(1);
|
||||
float* Rx = tmp.ptr<float>(2);
|
||||
float* Ry = tmp.ptr<float>(3);
|
||||
|
||||
for (int i = 0; i < mterms; ++i) {
|
||||
Bx[i] = static_cast<float>(blue_x[i]);
|
||||
By[i] = static_cast<float>(blue_y[i]);
|
||||
Rx[i] = static_cast<float>(red_x[i]);
|
||||
Ry[i] = static_cast<float>(red_y[i]);
|
||||
}
|
||||
|
||||
calib_size = Size(imgW, imgH);
|
||||
degree = deg_red;
|
||||
tmp.copyTo(coeffMat);
|
||||
}
|
||||
|
||||
static void buildRemapsFromCoeffMat(int height, int width,
|
||||
const Mat& coeffs,
|
||||
int degree,
|
||||
int rowX, int rowY,
|
||||
Mat& map_x, Mat& map_y)
|
||||
{
|
||||
if (coeffs.type() != CV_32F) {
|
||||
CV_Error_(Error::StsUnsupportedFormat,
|
||||
("coeffs Mat must be CV_32F (got type=%d)", coeffs.type()));
|
||||
}
|
||||
|
||||
if (coeffs.rows != 4) {
|
||||
CV_Error_(Error::StsBadSize,
|
||||
("coeffs.rows must be 4 (Bx,By,Rx,Ry); got %d", coeffs.rows));
|
||||
}
|
||||
|
||||
if (rowX < 0 || rowX >= coeffs.rows) {
|
||||
CV_Error_(Error::StsOutOfRange,
|
||||
("rowX index %d out of range [0,%d)", rowX, coeffs.rows));
|
||||
}
|
||||
if (rowY < 0 || rowY >= coeffs.rows) {
|
||||
CV_Error_(Error::StsOutOfRange,
|
||||
("rowY index %d out of range [0,%d)", rowY, coeffs.rows));
|
||||
}
|
||||
|
||||
if (degree < 0) {
|
||||
CV_Error_(Error::StsBadArg,
|
||||
("degree must be non-negative; got %d", degree));
|
||||
}
|
||||
|
||||
const int expected_terms = (degree + 1) * (degree + 2) / 2;
|
||||
if (coeffs.cols != expected_terms) {
|
||||
CV_Error_(Error::StsBadSize,
|
||||
("coeffs.cols (%d) != expected polynomial term count (%d) for degree=%d",
|
||||
coeffs.cols, expected_terms, degree));
|
||||
}
|
||||
|
||||
if (width <= 0 || height <= 0) {
|
||||
CV_Error_(Error::StsBadArg,
|
||||
("width (%d) and height (%d) must be positive", width, height));
|
||||
}
|
||||
|
||||
Mat X(1, width, CV_32F), Y(height, 1, CV_32F);
|
||||
for (int i = 0; i < width; ++i) X.at<float>(0,i) = (float)i;
|
||||
for (int j = 0; j < height; ++j) Y.at<float>(j,0) = (float)j;
|
||||
|
||||
Mat Xgrid, Ygrid;
|
||||
repeat(X, height, 1, Xgrid);
|
||||
repeat(Y, 1, width, Ygrid);
|
||||
|
||||
Mat dx(height, width, CV_32F);
|
||||
Mat dy(height, width, CV_32F);
|
||||
|
||||
const double mean_x = width * 0.5;
|
||||
const double mean_y = height * 0.5;
|
||||
const double inv_std_x = 1.0 / mean_x;
|
||||
const double inv_std_y = 1.0 / mean_y;
|
||||
|
||||
const float* Cx = coeffs.ptr<float>(rowX);
|
||||
const float* Cy = coeffs.ptr<float>(rowY);
|
||||
|
||||
|
||||
parallel_for_(Range(0, height), [&](const Range& rows){
|
||||
std::vector<double> x_pow(degree + 1);
|
||||
std::vector<double> y_pow(degree + 1);
|
||||
for (int y = rows.start; y < rows.end; ++y) {
|
||||
const float* XR = Xgrid.ptr<float>(y);
|
||||
const float* YR = Ygrid.ptr<float>(y);
|
||||
float* DX = dx.ptr<float>(y);
|
||||
float* DY = dy.ptr<float>(y);
|
||||
for (int x = 0; x < width; ++x) {
|
||||
const double xn = (XR[x] - mean_x) * inv_std_x;
|
||||
const double yn = (YR[x] - mean_y) * inv_std_y;
|
||||
|
||||
x_pow[0] = y_pow[0] = 1.0;
|
||||
for (int k = 1; k <= degree; ++k) {
|
||||
x_pow[k] = x_pow[k-1] * xn;
|
||||
y_pow[k] = y_pow[k-1] * yn;
|
||||
}
|
||||
|
||||
double dxv = 0.0, dyv = 0.0;
|
||||
int idx = 0;
|
||||
for (int t = 0; t <= degree; ++t){
|
||||
for (int i = 0; i <= t; ++i){
|
||||
const int j = t - i;
|
||||
const double term = x_pow[i] * y_pow[j];
|
||||
dxv += Cx[idx] * term;
|
||||
dyv += Cy[idx] * term;
|
||||
++idx;
|
||||
}
|
||||
}
|
||||
|
||||
DX[x] = (float)dxv;
|
||||
DY[x] = (float)dyv;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
map_x = Xgrid - dx;
|
||||
map_y = Ygrid - dy;
|
||||
}
|
||||
|
||||
void correctChromaticAberration(InputArray input_image,
|
||||
InputArray coefficients,
|
||||
OutputArray output_image,
|
||||
const Size& calib_size,
|
||||
int calib_degree,
|
||||
int bayer_pattern)
|
||||
{
|
||||
Mat image = input_image.getMat();
|
||||
const Mat coeffMat = coefficients.getMat();
|
||||
|
||||
if (image.channels() == 1) {
|
||||
if (bayer_pattern < 0) {
|
||||
CV_Error_(Error::StsBadArg,
|
||||
("Single-channel input detected: must pass a valid bayer_pattern"));
|
||||
}
|
||||
Mat dem;
|
||||
demosaicing(image, dem, bayer_pattern);
|
||||
image = dem;
|
||||
}
|
||||
|
||||
const int height = image.rows;
|
||||
const int width = image.cols;
|
||||
|
||||
if (height != calib_size.height || width != calib_size.width) {
|
||||
CV_Error_(Error::StsBadArg,
|
||||
("Image size %dx%d does not match calibration %dx%d",
|
||||
width, height, calib_size.width, calib_size.height));
|
||||
}
|
||||
|
||||
std::vector<Mat> channels;
|
||||
split(image, channels);
|
||||
Mat b = channels[0], g = channels[1], r = channels[2];
|
||||
|
||||
Mat map_x_r, map_y_r, map_x_b, map_y_b;
|
||||
buildRemapsFromCoeffMat(height, width, coeffMat, calib_degree, 2, 3, map_x_r, map_y_r);
|
||||
buildRemapsFromCoeffMat(height, width, coeffMat, calib_degree, 0, 1, map_x_b, map_y_b);
|
||||
|
||||
Mat r_corr, b_corr;
|
||||
remap(r, r_corr, map_x_r, map_y_r, INTER_LINEAR, BORDER_REPLICATE);
|
||||
remap(b, b_corr, map_x_b, map_y_b, INTER_LINEAR, BORDER_REPLICATE);
|
||||
|
||||
std::vector<Mat> corrected_channels = {b_corr, g, r_corr};
|
||||
Mat corrected_image;
|
||||
merge(corrected_channels, output_image);
|
||||
}
|
||||
}
|
||||
144
modules/photo/test/test_chromatic_aberration.cpp
Normal file
144
modules/photo/test/test_chromatic_aberration.cpp
Normal file
@@ -0,0 +1,144 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#include "test_precomp.hpp"
|
||||
|
||||
namespace opencv_test { namespace {
|
||||
|
||||
const unsigned long EXPECTED_COEFFS_SIZE = 78;
|
||||
|
||||
class ChromaticAberrationTest : public testing::Test
|
||||
{
|
||||
protected:
|
||||
std::string test_yaml_file;
|
||||
cv::Mat test_image;
|
||||
cv::Mat coeffMat;
|
||||
cv::Mat corrected;
|
||||
int degree = -1;
|
||||
Size calib_size = {-1, -1};
|
||||
|
||||
void SetUp() override
|
||||
{
|
||||
string data_path = cvtest::TS::ptr()->get_data_path();
|
||||
ASSERT_TRUE(!data_path.empty()) << "OPENCV_TEST_DATA_PATH not set";
|
||||
test_yaml_file = std::string(data_path) + "cameracalibration/chromatic_aberration/ca_photo_calib.yaml";
|
||||
test_image = cv::imread(std::string(data_path) + "cameracalibration/chromatic_aberration/ca_photo.png");
|
||||
ASSERT_FALSE(test_image.empty()) << "Failed to load test image";
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(ChromaticAberrationTest, LoadCalibAndCorrectImage)
|
||||
{
|
||||
FileStorage fs(test_yaml_file, FileStorage::READ);
|
||||
ASSERT_TRUE(fs.isOpened());
|
||||
ASSERT_NO_THROW(cv::loadChromaticAberrationParams(fs.root(), coeffMat, calib_size, degree));
|
||||
|
||||
ASSERT_FALSE(coeffMat.empty());
|
||||
ASSERT_EQ(coeffMat.type(), CV_32F);
|
||||
ASSERT_EQ(coeffMat.rows, 4);
|
||||
ASSERT_GT(coeffMat.cols, 0);
|
||||
ASSERT_EQ((degree + 1) * (degree + 2) / 2, coeffMat.cols);
|
||||
ASSERT_GT(calib_size.width, 0);
|
||||
ASSERT_GT(calib_size.height, 0);
|
||||
|
||||
ASSERT_EQ(test_image.cols, calib_size.width);
|
||||
ASSERT_EQ(test_image.rows, calib_size.height);
|
||||
|
||||
ASSERT_NO_THROW(cv::correctChromaticAberration(test_image, coeffMat, corrected, calib_size, degree));
|
||||
|
||||
EXPECT_EQ(corrected.size(), test_image.size());
|
||||
EXPECT_EQ(corrected.channels(), test_image.channels());
|
||||
EXPECT_EQ(corrected.type(), test_image.type());
|
||||
|
||||
cv::Mat diff; cv::absdiff(test_image, corrected, diff);
|
||||
cv::Scalar s = cv::sum(diff);
|
||||
EXPECT_GT(s[0] + s[1] + s[2], 0.0);
|
||||
}
|
||||
|
||||
TEST_F(ChromaticAberrationTest, YAMLContentsAsExpected)
|
||||
{
|
||||
cv::FileStorage fs(test_yaml_file, cv::FileStorage::READ);
|
||||
ASSERT_TRUE(fs.isOpened());
|
||||
|
||||
cv::FileNode red_node = fs["red_channel"];
|
||||
cv::FileNode blue_node = fs["blue_channel"];
|
||||
EXPECT_TRUE(red_node.isMap());
|
||||
EXPECT_TRUE(blue_node.isMap());
|
||||
|
||||
std::vector<double> coeffs_x;
|
||||
red_node["coeffs_x"] >> coeffs_x;
|
||||
EXPECT_EQ(coeffs_x.size(), EXPECTED_COEFFS_SIZE);
|
||||
blue_node["coeffs_x"] >> coeffs_x;
|
||||
EXPECT_EQ(coeffs_x.size(), EXPECTED_COEFFS_SIZE);
|
||||
|
||||
std::vector<double> coeffs_y;
|
||||
red_node["coeffs_y"] >> coeffs_y;
|
||||
EXPECT_EQ(coeffs_y.size(), EXPECTED_COEFFS_SIZE);
|
||||
blue_node["coeffs_y"] >> coeffs_y;
|
||||
EXPECT_EQ(coeffs_y.size(), EXPECTED_COEFFS_SIZE);
|
||||
|
||||
fs.release();
|
||||
}
|
||||
|
||||
TEST_F(ChromaticAberrationTest, InvalidSingleChannel)
|
||||
{
|
||||
FileStorage fs(test_yaml_file, FileStorage::READ);
|
||||
ASSERT_TRUE(fs.isOpened());
|
||||
ASSERT_NO_THROW(cv::loadChromaticAberrationParams(fs.root(), coeffMat, calib_size, degree));
|
||||
|
||||
cv::Mat gray;
|
||||
cv::cvtColor(test_image, gray, cv::COLOR_BGR2GRAY);
|
||||
|
||||
EXPECT_THROW(cv::correctChromaticAberration(gray, coeffMat, corrected, calib_size, degree),
|
||||
cv::Exception);
|
||||
}
|
||||
|
||||
TEST_F(ChromaticAberrationTest, EmptyCoeffMat)
|
||||
{
|
||||
FileStorage fs(test_yaml_file, FileStorage::READ);
|
||||
ASSERT_TRUE(fs.isOpened());
|
||||
ASSERT_NO_THROW(cv::loadChromaticAberrationParams(fs.root(), coeffMat, calib_size, degree));
|
||||
|
||||
cv::Mat emptyCoeff;
|
||||
EXPECT_THROW(cv::correctChromaticAberration(test_image, emptyCoeff, corrected, calib_size, degree),
|
||||
cv::Exception);
|
||||
}
|
||||
|
||||
TEST_F(ChromaticAberrationTest, MismatchedImageSize)
|
||||
{
|
||||
FileStorage fs(test_yaml_file, FileStorage::READ);
|
||||
ASSERT_TRUE(fs.isOpened());
|
||||
ASSERT_NO_THROW(cv::loadChromaticAberrationParams(fs.root(), coeffMat, calib_size, degree));
|
||||
|
||||
cv::Mat resized;
|
||||
cv::resize(test_image, resized, cv::Size(test_image.cols/2, test_image.rows/2));
|
||||
EXPECT_THROW(cv::correctChromaticAberration(resized, coeffMat, corrected, calib_size, degree),
|
||||
cv::Exception);
|
||||
}
|
||||
|
||||
TEST_F(ChromaticAberrationTest, WrongCoeffType)
|
||||
{
|
||||
FileStorage fs(test_yaml_file, FileStorage::READ);
|
||||
ASSERT_TRUE(fs.isOpened());
|
||||
ASSERT_NO_THROW(cv::loadChromaticAberrationParams(fs.root(), coeffMat, calib_size, degree));
|
||||
|
||||
cv::Mat wrongType;
|
||||
coeffMat.convertTo(wrongType, CV_64F);
|
||||
EXPECT_THROW(cv::correctChromaticAberration(test_image, wrongType, corrected, calib_size, degree),
|
||||
cv::Exception);
|
||||
}
|
||||
|
||||
TEST_F(ChromaticAberrationTest, DegreeDoesNotMatchCoeffCols)
|
||||
{
|
||||
FileStorage fs(test_yaml_file, FileStorage::READ);
|
||||
ASSERT_TRUE(fs.isOpened());
|
||||
ASSERT_NO_THROW(cv::loadChromaticAberrationParams(fs.root(), coeffMat, calib_size, degree));
|
||||
|
||||
int wrongDegree = std::max(1, degree - 1);
|
||||
ASSERT_NE((wrongDegree + 1) * (wrongDegree + 2) / 2, coeffMat.cols);
|
||||
EXPECT_THROW(cv::correctChromaticAberration(test_image, coeffMat, corrected, calib_size, wrongDegree),
|
||||
cv::Exception);
|
||||
}
|
||||
|
||||
}}
|
||||
127
modules/python/test/test_chromatic_aberration.py
Normal file
127
modules/python/test/test_chromatic_aberration.py
Normal file
@@ -0,0 +1,127 @@
|
||||
#!/usr/bin/env python3
|
||||
# This file is part of OpenCV project.
|
||||
# It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
# of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
from tests_common import NewOpenCVTests
|
||||
|
||||
EXPECTED_COEFFS_SIZE = 78
|
||||
|
||||
class ChromaticAberrationTest(NewOpenCVTests):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
self.test_yaml_file = self.find_file(
|
||||
"cv/cameracalibration/chromatic_aberration/ca_photo_calib.yaml"
|
||||
)
|
||||
|
||||
self.test_image = self.get_sample(
|
||||
"cv/cameracalibration/chromatic_aberration/ca_photo.png", 1
|
||||
)
|
||||
self.assertIsNotNone(self.test_image, "Failed to load test image")
|
||||
self.assertFalse(self.test_image.size == 0, "Failed to load test image")
|
||||
|
||||
def test_load_calib_and_correct_image(self):
|
||||
fs = cv.FileStorage(self.test_yaml_file, cv.FileStorage_READ)
|
||||
self.assertTrue(fs.isOpened())
|
||||
coeffMat, calib_size, degree = cv.loadChromaticAberrationParams(fs.root())
|
||||
|
||||
self.assertIsInstance(coeffMat, np.ndarray)
|
||||
self.assertEqual(coeffMat.dtype, np.float32)
|
||||
self.assertEqual(coeffMat.shape[0], 4)
|
||||
self.assertGreater(coeffMat.shape[1], 0)
|
||||
self.assertGreater(degree, 0)
|
||||
self.assertGreater(calib_size[0], 0)
|
||||
self.assertGreater(calib_size[1], 0)
|
||||
self.assertEqual(coeffMat.shape[1], EXPECTED_COEFFS_SIZE)
|
||||
|
||||
self.assertEqual(self.test_image.shape[1], calib_size[0])
|
||||
self.assertEqual(self.test_image.shape[0], calib_size[1])
|
||||
|
||||
corrected = cv.correctChromaticAberration(self.test_image, coeffMat, calib_size, degree)
|
||||
|
||||
self.assertEqual(corrected.shape[:2], self.test_image.shape[:2])
|
||||
self.assertEqual(corrected.dtype, self.test_image.dtype)
|
||||
|
||||
diff = cv.absdiff(self.test_image, corrected)
|
||||
sum_diff = cv.sumElems(diff)
|
||||
self.assertGreater(sum(sum_diff[:3]), 0.0)
|
||||
|
||||
def test_yaml_contents_as_expected(self):
|
||||
fs = cv.FileStorage(self.test_yaml_file, cv.FileStorage_READ)
|
||||
self.assertTrue(fs.isOpened())
|
||||
|
||||
red_node = fs.getNode("red_channel")
|
||||
blue_node = fs.getNode("blue_channel")
|
||||
self.assertTrue(red_node.isMap())
|
||||
self.assertTrue(blue_node.isMap())
|
||||
|
||||
coeffs_x = red_node.getNode("coeffs_x")
|
||||
self.assertIsNotNone(coeffs_x)
|
||||
self.assertEqual(coeffs_x.size(), EXPECTED_COEFFS_SIZE)
|
||||
|
||||
coeffs_x = blue_node.getNode("coeffs_x")
|
||||
self.assertIsNotNone(coeffs_x)
|
||||
self.assertEqual(coeffs_x.size(), EXPECTED_COEFFS_SIZE)
|
||||
|
||||
coeffs_y = red_node.getNode("coeffs_y")
|
||||
self.assertIsNotNone(coeffs_y)
|
||||
self.assertEqual(coeffs_y.size(), EXPECTED_COEFFS_SIZE)
|
||||
|
||||
coeffs_y = blue_node.getNode("coeffs_y")
|
||||
self.assertIsNotNone(coeffs_y)
|
||||
self.assertEqual(coeffs_y.size(), EXPECTED_COEFFS_SIZE)
|
||||
|
||||
fs.release()
|
||||
|
||||
def test_invalid_single_channel(self):
|
||||
fs = cv.FileStorage(self.test_yaml_file, cv.FileStorage_READ)
|
||||
self.assertTrue(fs.isOpened())
|
||||
coeffMat, calib_size, degree = cv.loadChromaticAberrationParams(fs.root())
|
||||
|
||||
|
||||
gray = cv.cvtColor(self.test_image, cv.COLOR_BGR2GRAY)
|
||||
with self.assertRaises(cv.error):
|
||||
_ = cv.correctChromaticAberration(gray, coeffMat, calib_size, degree)
|
||||
|
||||
def test_empty_coeff_mat(self):
|
||||
fs = cv.FileStorage(self.test_yaml_file, cv.FileStorage_READ)
|
||||
self.assertTrue(fs.isOpened())
|
||||
_, calib_size, degree = cv.loadChromaticAberrationParams(fs.root())
|
||||
|
||||
emptyCoeff = np.empty((0, 0), dtype=np.float32)
|
||||
with self.assertRaises(cv.error):
|
||||
_ = cv.correctChromaticAberration(self.test_image, emptyCoeff, calib_size, degree)
|
||||
|
||||
def test_mismatched_image_size(self):
|
||||
fs = cv.FileStorage(self.test_yaml_file, cv.FileStorage_READ)
|
||||
self.assertTrue(fs.isOpened())
|
||||
coeffMat, calib_size, degree = cv.loadChromaticAberrationParams(fs.root())
|
||||
|
||||
resized = cv.resize(self.test_image, (self.test_image.shape[1] // 2, self.test_image.shape[0] // 2))
|
||||
with self.assertRaises(cv.error):
|
||||
_ = cv.correctChromaticAberration(resized, coeffMat, calib_size, degree)
|
||||
|
||||
def test_wrong_coeff_type(self):
|
||||
fs = cv.FileStorage(self.test_yaml_file, cv.FileStorage_READ)
|
||||
self.assertTrue(fs.isOpened())
|
||||
coeffMat, calib_size, degree = cv.loadChromaticAberrationParams(fs.root())
|
||||
|
||||
wrongType = coeffMat.astype(np.float64)
|
||||
with self.assertRaises(cv.error):
|
||||
_ = cv.correctChromaticAberration(self.test_image, wrongType, calib_size, degree)
|
||||
|
||||
def test_degree_does_not_match_coeff_cols(self):
|
||||
fs = cv.FileStorage(self.test_yaml_file, cv.FileStorage_READ)
|
||||
self.assertTrue(fs.isOpened())
|
||||
coeffMat, calib_size, degree = cv.loadChromaticAberrationParams(fs.root())
|
||||
|
||||
wrongDegree = max(1, degree - 1)
|
||||
self.assertNotEqual(wrongDegree, coeffMat.shape[1])
|
||||
with self.assertRaises(cv.error):
|
||||
_ = cv.correctChromaticAberration(self.test_image, coeffMat, calib_size, wrongDegree)
|
||||
|
||||
if __name__ == '__main__':
|
||||
NewOpenCVTests.bootstrap()
|
||||
115
samples/cpp/snippets/chromatic_aberration_correction.cpp
Normal file
115
samples/cpp/snippets/chromatic_aberration_correction.cpp
Normal file
@@ -0,0 +1,115 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/photo.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
static const char* usage =
|
||||
"Chromatic Aberration Correction Sample\n"
|
||||
"Usage:\n"
|
||||
" ca_correction <input_image> <calibration_file> [bayer_pattern] [output_image]\n"
|
||||
"\n"
|
||||
"Arguments:\n"
|
||||
" input_image Path to the input image. Can be:\n"
|
||||
" • a 3-channel BGR image, or\n"
|
||||
" • a 1-channel raw Bayer image (see bayer_pattern)\n"
|
||||
" calibration_file OpenCV YAML/XML file with chromatic aberration calibration:\n"
|
||||
" image_width, image_height, red_channel/coeffs_x, coeffs_y,\n"
|
||||
" blue_channel/coeffs_x, coeffs_y.\n"
|
||||
" output_image (optional) Path to save the corrected image. Default: corrected.png\n"
|
||||
" bayer_pattern (optional) integer code for demosaicing a 1-channel raw image:\n"
|
||||
" cv::COLOR_BayerBG2BGR = 46\n"
|
||||
" cv::COLOR_BayerGB2BGR = 47\n"
|
||||
" cv::COLOR_BayerGR2BGR = 48\n"
|
||||
" cv::COLOR_BayerRG2BGR = 49\n"
|
||||
" If omitted or <0, input is assumed 3-channel BGR.\n"
|
||||
"\n"
|
||||
"Example:\n"
|
||||
" ca_correction input.png calib.yaml 46 corrected.png\n"
|
||||
"\n";
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
const string keys =
|
||||
"{help h | | show this help message }"
|
||||
"{@input | | input image (BGR or Bayer)}"
|
||||
"{@calibration | | calibration file (YAML/XML) }"
|
||||
"{output |corrected.png| output image file }"
|
||||
"{bayer |-1 | Bayer pattern code for demosaic }"
|
||||
;
|
||||
|
||||
CommandLineParser parser(argc, argv, keys);
|
||||
parser.about("Chromatic Aberration Correction Sample");
|
||||
if (parser.has("help") || argc < 3)
|
||||
{
|
||||
cout << usage << "\n";
|
||||
return 0;
|
||||
}
|
||||
|
||||
string inputPath = parser.get<string>("@input");
|
||||
string calibPath = parser.get<string>("@calibration");
|
||||
string outputPath = parser.get<string>("output");
|
||||
int bayerPattern = parser.get<int>("bayer");
|
||||
|
||||
if (!parser.check())
|
||||
{
|
||||
parser.printErrors();
|
||||
return 1;
|
||||
}
|
||||
|
||||
Mat input = imread(inputPath, IMREAD_UNCHANGED);
|
||||
if (input.empty())
|
||||
{
|
||||
cerr << "ERROR: Could not load input image: " << inputPath << endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
FileStorage fs(calibPath, FileStorage::READ);
|
||||
if (!fs.isOpened())
|
||||
{
|
||||
cerr << "ERROR: Could not load coeffients file: " << calibPath << endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
Mat coeffMat;
|
||||
Size calibSize = {-1, -1};
|
||||
int degree = -1;
|
||||
|
||||
cv::loadChromaticAberrationParams(fs.root(), coeffMat, calibSize, degree);
|
||||
|
||||
Mat corrected;
|
||||
correctChromaticAberration(input, coeffMat, corrected, calibSize, degree, bayerPattern);
|
||||
|
||||
namedWindow("Original", WINDOW_AUTOSIZE);
|
||||
namedWindow("Corrected", WINDOW_AUTOSIZE);
|
||||
imshow("Original", input);
|
||||
imshow("Corrected", corrected);
|
||||
cout << "Press any key to continue..." << endl;
|
||||
waitKey();
|
||||
|
||||
if (!imwrite(outputPath, corrected))
|
||||
{
|
||||
cerr << "WARNING: Could not write output image: " << outputPath << endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
cout << "Saved corrected image to: " << outputPath << endl;
|
||||
}
|
||||
}
|
||||
catch (const Exception& e)
|
||||
{
|
||||
cerr << "OpenCV error: " << e.what() << endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
83
samples/python/snippets/chromatic_aberration_correction.py
Executable file
83
samples/python/snippets/chromatic_aberration_correction.py
Executable file
@@ -0,0 +1,83 @@
|
||||
#!/usr/bin/env python3
|
||||
# This file is part of OpenCV project.
|
||||
# It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
# of this distribution and at http://opencv.org/license.html
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import cv2 as cv
|
||||
|
||||
USAGE = """\
|
||||
Chromatic Aberration Correction Sample
|
||||
Usage:
|
||||
chromatic_aberration_correction.py <input_image> <calibration_file> [--bayer <code>] [--output <path>]
|
||||
|
||||
Arguments:
|
||||
input_image Path to the input image. Can be:
|
||||
• a 3-channel BGR image, or
|
||||
• a 1-channel raw Bayer image (see bayer_pattern)
|
||||
calibration_file OpenCV YAML/XML file with chromatic aberration calibration:
|
||||
image_width, image_height, red_channel/coeffs_x, coeffs_y,
|
||||
blue_channel/coeffs_x, coeffs_y.
|
||||
output (optional) Path to save the corrected image. Default: corrected.png
|
||||
bayer (optional) integer code for demosaicing a 1-channel raw image
|
||||
If omitted or <0, input is assumed 3-channel BGR.
|
||||
|
||||
Example:
|
||||
python chromatic_aberration_correction.py input.png calib.yaml --bayer 46 --output corrected.png
|
||||
"""
|
||||
|
||||
def main(argv=None):
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Chromatic Aberration Correction Sample",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog=USAGE
|
||||
)
|
||||
parser.add_argument("input", help="Input image (BGR or Bayer)")
|
||||
parser.add_argument("calibration", help="Calibration file (YAML/XML)")
|
||||
parser.add_argument("--output", default="corrected.png", help="Output image file")
|
||||
parser.add_argument("--bayer", type=int, default=-1, help="Bayer pattern code for demosaic")
|
||||
parser.add_argument("--no-gui", action="store_true", help="Do not open image windows")
|
||||
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
img = cv.imread(args.input, cv.IMREAD_UNCHANGED)
|
||||
if img is None:
|
||||
print(f"ERROR: Could not load input image: {args.input}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
fs = cv.FileStorage(args.calibration, cv.FileStorage_READ)
|
||||
if not fs.isOpened():
|
||||
print(f"Could not calibration coefficients from {args.calibration}")
|
||||
return 1
|
||||
|
||||
try:
|
||||
coeffMat, size, degree = cv.loadChromaticAberrationParams(fs.root())
|
||||
corrected = cv.correctChromaticAberration(img, coeffMat, size, degree, args.bayer)
|
||||
|
||||
if corrected is None:
|
||||
print("ERROR: cv.correctChromaticAberration returned None", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
if not args.no_gui:
|
||||
cv.namedWindow("Original", cv.WINDOW_AUTOSIZE)
|
||||
cv.namedWindow("Corrected", cv.WINDOW_AUTOSIZE)
|
||||
cv.imshow("Original", img)
|
||||
cv.imshow("Corrected", corrected)
|
||||
print("Press any key to continue...")
|
||||
cv.waitKey(0)
|
||||
cv.destroyAllWindows()
|
||||
|
||||
if not cv.imwrite(args.output, corrected):
|
||||
print(f"WARNING: Could not write output image: {args.output}", file=sys.stderr)
|
||||
else:
|
||||
print(f"Saved corrected image to: {args.output}")
|
||||
|
||||
except cv.error as e:
|
||||
print(f"OpenCV error: {e}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
Reference in New Issue
Block a user