diff --git a/src/kikuchipy/_utils/_detector_coordinates.py b/src/kikuchipy/_utils/_detector_coordinates.py
new file mode 100644
index 00000000..174f5a73
--- /dev/null
+++ b/src/kikuchipy/_utils/_detector_coordinates.py
@@ -0,0 +1,357 @@
+# Copyright 2019-2024 The kikuchipy developers
+#
+# This file is part of kikuchipy.
+#
+# kikuchipy is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# kikuchipy is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with kikuchipy. If not, see .
+
+"""Functions for converting between pixel and gnomonic detector coordinates."""
+
+from typing import Union
+
+import numpy as np
+
+
+def get_coordinate_conversions(gnomonic_bounds: np.ndarray, bounds: np.ndarray) -> dict:
+ """
+ Get factors for converting between pixel and gnomonic coordinates.
+
+ Return a dict 'conversions' containing the keys
+ "pix_to_gn", containing factors for converting
+ pixel to gnomonic coordinates, and "gn_to_pix",
+ containing factors for converting gnomonic to pixel
+ coordinates.
+ Under each of these keys is a further dict with the
+ keys: "m_x", "c_x", "m_y" and "c_y". These are the
+ slope (m) and y-intercept (c) corresponding to
+ y = mx + c, which describes the linear conversion
+ of the coordinates. A (different) linear relationship
+ is required for x (column) and y(row) coordinates,
+ hence the two sets of m and c parameters.
+
+ Parameters
+ ----------
+ gnomonic_bounds
+ Array of shape at least (4,) containing the
+ gnomonic bounds of the EBSD detector screen.
+ Typically obtained as the "gnomonic_bounds"
+ property of an EBSDDetector.
+ bounds
+ Array of four ints giving the detector bounds
+ [x0, x1, y0, y1] in pixel coordinates. Typically
+ obtained from the "bounds" property of an
+ EBSDDetector.
+
+ Returns
+ -------
+ conversions
+ Contains the keys "pix_to_gn", containing factors
+ for converting pixel to gnomonic coordinates, and
+ "gn_to_pix", containing factors for converting
+ gnomonic to pixel coordinates.
+ Under each of these keys is a further dict with the
+ keys: "m_x", "c_x", "m_y" and "c_y". These are the
+ slope (m) and y-intercept (c) corresponding to
+ y = mx + c, which describes the linear conversion
+ of the coordinates. A (different) linear relationship
+ is required for x (column) and y(row) coordinates,
+ hence the two sets of m and c parameters.
+
+ Examples
+ --------
+ Create an EBSD detector and get the coordinate conversion factors.
+
+ >>> import numpy as np
+ >>> import kikuchipy as kp
+ >>> from kikuchipy._utils._detector_coordinates import get_coordinate_conversions
+ >>> det = kp.detectors.EBSDDetector(
+ ... shape=(60, 60),
+ ... pc=np.ones((10, 20, 3)) * (0.421, 0.779, 0.505),
+ ... convention="edax",
+ ... px_size=70,
+ ... binning=8,
+ ... tilt=5,
+ ... sample_tilt=70,
+ ... )
+ >>> det
+ EBSDDetector(shape=(60, 60), pc=(0.421, 0.221, 0.505), sample_tilt=70.0, tilt=5.0, azimuthal=0.0, twist=0.0, binning=8.0, px_size=70.0 um)
+ >>> det.navigation_shape
+ (10, 20)
+ >>> det.bounds
+ array([ 0, 59, 0, 59])
+ >>> det.gnomonic_bounds[0, 0]
+ array([-0.83366337, 1.14653465, -1.54257426, 0.43762376])
+ >>> conversions = get_coordinate_conversions(det.gnomonic_bounds, det.bounds)
+ >>> conversions["pix_to_gn"]["m_x"].shape
+ (10, 20)
+ """
+ gnomonic_bounds = np.atleast_2d(gnomonic_bounds)
+
+ m_pix_to_gn_x = (gnomonic_bounds[..., 1] - gnomonic_bounds[..., 0]) / (
+ bounds[1] + 1
+ )
+ c_pix_to_gn_x = gnomonic_bounds[..., 0]
+
+ m_pix_to_gn_y = (gnomonic_bounds[..., 2] - gnomonic_bounds[..., 3]) / (
+ bounds[3] + 1
+ )
+ c_pix_to_gn_y = gnomonic_bounds[..., 3]
+
+ m_gn_to_pix_x = 1 / m_pix_to_gn_x
+ c_gn_to_pix_x = -c_pix_to_gn_x / m_pix_to_gn_x
+
+ m_gn_to_pix_y = 1 / m_pix_to_gn_y
+ c_gn_to_pix_y = -c_pix_to_gn_y / m_pix_to_gn_y
+
+ conversions = {
+ "pix_to_gn": {
+ "m_x": m_pix_to_gn_x,
+ "c_x": c_pix_to_gn_x,
+ "m_y": m_pix_to_gn_y,
+ "c_y": c_pix_to_gn_y,
+ },
+ "gn_to_pix": {
+ "m_x": m_gn_to_pix_x,
+ "c_x": c_gn_to_pix_x,
+ "m_y": m_gn_to_pix_y,
+ "c_y": c_gn_to_pix_y,
+ },
+ }
+
+ return conversions
+
+
+def convert_coordinates(
+ coords: np.ndarray,
+ direction: str,
+ conversions: dict,
+ detector_index: Union[None, tuple, int] = None,
+) -> np.ndarray:
+ """
+ Convert between gnomonic and pixel coordinates.
+
+ Parameters
+ ----------
+ coords
+ An array of coordinates of any shape whereby the
+ x and y coordinates to be converted are stored in
+ the last axis.
+ direction
+ Either "pix_to_gn" or "gn_to_pix", depending on the
+ direction of conversion needed.
+ conversions
+ Dict containing the conversion parameters. Usually
+ the output of get_coordinate_conversions().
+ Contains the keys "pix_to_gn", containing factors
+ for converting pixel to gnomonic coordinates, and
+ "gn_to_pix", containing factors for converting
+ gnomonic to pixel coordinates.
+ Under each of these keys is a further dict with the
+ keys: "m_x", "c_x", "m_y" and "c_y". These are the
+ slope (m) and y-intercept (c) corresponding to
+ y = mx + c, which describes the linear conversion
+ of the coordinates. A (different) linear relationship
+ is required for x (column) and y(row) coordinates,
+ hence the two sets of m and c parameters.
+ The shape of each array of conversion factors
+ typically corresponds to the navigation shape
+ of an EBSDDetector.
+ detector_index
+ Index showing which conversion factors in *conversions[direction]*
+ should be applied to *coords*.
+ If None, **all** conversion factors in *conversions[direction]*
+ are applied to *coords*.
+ If an int is supplied, this refers to an index in a 1D dataset.
+ A 1D tuple *e.g.* (3,) can also be passed for a 1D dataset.
+ A 2D index can be specified by supplying a tuple *e.g.* (2, 3).
+ The default value is None.
+
+ Returns
+ -------
+ coords_out
+ Array of coords but with values converted as specified
+ by direction. The shape is either the same as the input
+ or is the navigation shape then the shape of the input.
+
+ Examples
+ --------
+
+ Convert 300 xy coordinates for all patterns in a dataset.
+
+ >>> import numpy as np
+ >>> import kikuchipy as kp
+ >>> from kikuchipy._utils._detector_coordinates import (get_coordinate_conversions, convert_coordinates)
+ >>> s = kp.data.nickel_ebsd_small()
+ >>> det = s.detector
+ >>> det.navigation_shape
+ (3, 3)
+ >>> coords_2d = np.random.randint(0, 60, (300, 2))
+ >>> coords_2d.shape
+ (300, 2)
+ >>> conv = get_coordinate_conversions(det.gnomonic_bounds, det.bounds)
+ >>> coords_out = convert_coordinates(coords_2d, "pix_to_gn", conv, None)
+ >>> coords_out.shape
+ (3, 3, 300, 2)
+
+ Convert 300 xy coordinates for the pattern at index (1, 2) in a dataset.
+
+ >>> import numpy as np
+ >>> import kikuchipy as kp
+ >>> from kikuchipy._utils._detector_coordinates import (get_coordinate_conversions, convert_coordinates)
+ >>> s = kp.data.nickel_ebsd_small()
+ >>> det = s.detector
+ >>> det.navigation_shape
+ (3, 3)
+ >>> coords_2d = np.random.randint(0, 60, (300, 2))
+ >>> coords_2d.shape
+ (300, 2)
+ >>> conv = get_coordinate_conversions(det.gnomonic_bounds, det.bounds)
+ >>> coords_out = convert_coordinates(coords_2d, "pix_to_gn", conv, (1, 2))
+ >>> coords_out.shape
+ (300, 2)
+
+ Convert 17 sets of 300 xy coordinates, different for each pattern in a dataset.
+
+ >>> import numpy as np
+ >>> import kikuchipy as kp
+ >>> from kikuchipy._utils._detector_coordinates import (get_coordinate_conversions, convert_coordinates)
+ >>> s = kp.data.nickel_ebsd_small()
+ >>> det = s.detector
+ >>> det.navigation_shape
+ (3, 3)
+ >>> coords_2d = np.random.randint(0, 60, (3, 3, 17, 300, 2))
+ >>> coords_2d.shape
+ (3, 3, 17, 300, 2)
+ >>> conv = get_coordinate_conversions(det.gnomonic_bounds, det.bounds)
+ >>> coords_out = convert_coordinates(coords_2d, "pix_to_gn", conv, None)
+ >>> coords_out.shape
+ (3, 3, 17, 300, 2)
+
+ Convert 17 sets of 300 xy coordinates, the same for all pattern in a dataset.
+
+ >>> import numpy as np
+ >>> import kikuchipy as kp
+ >>> from kikuchipy._utils._detector_coordinates import (get_coordinate_conversions, convert_coordinates)
+ >>> s = kp.data.nickel_ebsd_small()
+ >>> det = s.detector
+ >>> det.navigation_shape
+ (3, 3)
+ >>> coords_2d = np.random.randint(0, 60, (17, 300, 2))
+ >>> coords_2d.shape
+ (17, 300, 2)
+ >>> conv = get_coordinate_conversions(det.gnomonic_bounds, det.bounds)
+ >>> coords_out = convert_coordinates(coords_2d, "pix_to_gn", conv, None)
+ >>> coords_out.shape
+ (3, 3, 17, 300, 2)
+ """
+ coords = np.atleast_2d(coords)
+
+ nav_shape = conversions[direction]["m_x"].shape
+ nav_ndim = len(nav_shape)
+
+ if isinstance(detector_index, type(None)):
+ detector_index = ()
+ if coords.ndim >= nav_ndim + 2 and coords.shape[:nav_ndim] == nav_shape:
+ # one or more sets of coords, different for each image
+ out_shape = coords.shape
+ else:
+ # one or more sets of coords, the same for each image
+ out_shape = nav_shape + coords.shape
+
+ extra_axes = list(range(nav_ndim, len(out_shape) - 1))
+
+ coords_out = _convert_coordinates(
+ coords,
+ out_shape,
+ detector_index,
+ np.expand_dims(conversions[direction]["m_x"], extra_axes),
+ np.expand_dims(conversions[direction]["c_x"], extra_axes),
+ np.expand_dims(conversions[direction]["m_y"], extra_axes),
+ np.expand_dims(conversions[direction]["c_y"], extra_axes),
+ )
+
+ else:
+ if isinstance(detector_index, int):
+ detector_index = tuple([detector_index])
+
+ out_shape = coords.shape
+
+ coords_out = _convert_coordinates(
+ coords,
+ out_shape,
+ detector_index,
+ conversions[direction]["m_x"],
+ conversions[direction]["c_x"],
+ conversions[direction]["m_y"],
+ conversions[direction]["c_y"],
+ )
+
+ return coords_out
+
+
+def _convert_coordinates(
+ coords: np.ndarray,
+ out_shape: tuple,
+ detector_index: tuple,
+ m_x: Union[np.ndarray, float],
+ c_x: Union[np.ndarray, float],
+ m_y: Union[np.ndarray, float],
+ c_y: Union[np.ndarray, float],
+) -> np.ndarray:
+ """
+ Return converted coordinate depending on arguments.
+
+ This function is usually called by convert_coordinates().
+
+ Parameters
+ ----------
+ coords
+ An array of coordinates whereby the x and y coordinates
+ to be converted are stored in the last axis
+ direction
+ Either "pix_to_gn" or "gn_to_pix", depending on the
+ direction of conversion needed.
+ conversions
+ dict
+
+
+ Parameters
+ ----------
+ coords
+ An array of coordinates whereby the x and y coordinates
+ to be converted are stored in the last axis.
+ out_shape
+ Tuple of ints giving the output shape.
+ detector_index
+ Tuple giving the detector index..
+ m_x
+ Conversion factor m for x coordinate.
+ c_x
+ Conversion factor c for x coordinate.
+ m_y
+ Conversion factor m for y coordinate.
+ c_y
+ Conversion factor c for y coordinate.
+
+ Returns
+ -------
+ coords_out
+ Array of coords the same shape as the input but
+ with converted values.
+ """
+ coords_out = np.zeros(out_shape, dtype=float)
+
+ coords_out[..., 0] = m_x[detector_index] * coords[..., 0] + c_x[detector_index]
+ coords_out[..., 1] = m_y[detector_index] * coords[..., 1] + c_y[detector_index]
+
+ return coords_out
diff --git a/src/kikuchipy/_utils/_gnonomic_bounds.py b/src/kikuchipy/_utils/_gnonomic_bounds.py
new file mode 100644
index 00000000..941544ab
--- /dev/null
+++ b/src/kikuchipy/_utils/_gnonomic_bounds.py
@@ -0,0 +1,63 @@
+# Copyright 2019-2024 The kikuchipy developers
+#
+# This file is part of kikuchipy.
+#
+# kikuchipy is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# kikuchipy is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with kikuchipy. If not, see .
+
+"""Function to calculate gnomonic bounds of an EBSDDetector"""
+
+import numpy as np
+
+
+def get_gnomonic_bounds(
+ nrows: int, ncols: int, pcx: float, pcy: float, pcz: float
+) -> np.ndarray:
+ """
+ Get a 1D array of gnomonic bounds for a single PC.
+
+ This function is used in by the objective functions
+ for refining orientations and PCs.
+
+ Parameters
+ ----------
+ nrows
+ Number of rows on the EBSD pattern.
+ Same as EBSDDetector.nrows
+ ncols
+ Number of columns on the EBSD pattern.
+ Same as EBSDDetector.ncols.
+ pcx
+ The x-coordinate of the pattern centre.
+ Same as EBSDDetector.pcx
+ pcy
+ The y-coordinate of the pattern centre.
+ Same as EBSDDetector.pcy
+ pcz
+ The z-coordinate of the pattern centre.
+ Same as EBSDDetector.pcz
+
+ Returns
+ -------
+ gnomonic_bounds
+ Array of the gnomonic bounds
+ [x_min, x_max, y_min, y_max].
+ """
+ aspect_ratio = ncols / nrows
+ x_min = -aspect_ratio * (pcx / pcz)
+ x_max = aspect_ratio * (1 - pcx) / pcz
+ y_min = -(1 - pcy) / pcz
+ y_max = pcy / pcz
+ gnomonic_bounds = np.array([x_min, x_max, y_min, y_max])
+
+ return gnomonic_bounds
diff --git a/src/kikuchipy/detectors/ebsd_detector.py b/src/kikuchipy/detectors/ebsd_detector.py
index 6d9a1a60..0e8aeea8 100644
--- a/src/kikuchipy/detectors/ebsd_detector.py
+++ b/src/kikuchipy/detectors/ebsd_detector.py
@@ -22,7 +22,7 @@
import logging
from pathlib import Path
import re
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Union
from diffsims.crystallography import ReciprocalLatticeVector
from matplotlib.figure import Figure
@@ -39,6 +39,10 @@
from sklearn.linear_model import LinearRegression, RANSACRegressor
from kikuchipy import __version__
+from kikuchipy._utils._detector_coordinates import (
+ convert_coordinates,
+ get_coordinate_conversions,
+)
from kikuchipy.indexing._hough_indexing import _get_indexer_from_detector
if TYPE_CHECKING: # pragma: no cover
@@ -90,6 +94,9 @@ class EBSDDetector:
Sample tilt about the sample RD (downwards) axis. A positive
angle means the sample normal moves towards the right looking
from the sample to the detector. Default is 0.
+ twist
+ Rotation angle of the detector about the normal to
+ the detector screen, Zd. Default is 0.
sample_tilt
Sample tilt from horizontal in degrees. Default is 70.
pc
@@ -182,7 +189,7 @@ class EBSDDetector:
... sample_tilt=70,
... )
>>> det
- EBSDDetector(shape=(60, 60), pc=(0.421, 0.221, 0.505), sample_tilt=70.0, tilt=5.0, azimuthal=0.0, binning=8.0, px_size=70.0 um)
+ EBSDDetector(shape=(60, 60), pc=(0.421, 0.221, 0.505), sample_tilt=70.0, tilt=5.0, azimuthal=0.0, twist=0.0, binning=8.0, px_size=70.0 um)
>>> det.navigation_shape
(10, 20)
>>> det.bounds
@@ -204,20 +211,23 @@ def __init__(
binning: int = 1,
tilt: float = 0.0,
azimuthal: float = 0.0,
+ twist: float = 0.0,
sample_tilt: float = 70.0,
pc: np.ndarray | list | tuple = (0.5, 0.5, 0.5),
convention: str | None = None,
) -> None:
"""Create an EBSD detector with a shape, pixel size, binning
factor, sample and detector tilt about the detector X axis,
- azimuthal tilt about the detector Y axis and one or more
- projection/pattern centers (PCs).
+ azimuthal tilt about the detector Y axis, twist about the
+ detector Z axis and one or more projection/pattern
+ centers (PCs).
"""
self.shape = shape
self.px_size = float(px_size)
- self.binning: float = float(binning)
+ self.binning = float(binning)
self.tilt = float(tilt)
self.azimuthal = float(azimuthal)
+ self.twist = float(twist)
self.sample_tilt = float(sample_tilt)
self.pc = pc
if convention is None:
@@ -231,6 +241,7 @@ def __repr__(self) -> str:
sample_tilt = np.round(self.sample_tilt, decimals)
tilt = np.round(self.tilt, decimals)
azimuthal = np.round(self.azimuthal, decimals)
+ twist = np.round(self.twist, decimals)
px_size = np.round(self.px_size, decimals)
return (
f"{type(self).__name__}"
@@ -239,10 +250,16 @@ def __repr__(self) -> str:
f"sample_tilt={sample_tilt}, "
f"tilt={tilt}, "
f"azimuthal={azimuthal}, "
+ f"twist={twist}, "
f"binning={self.binning}, "
f"px_size={px_size} um)"
)
+ @property
+ def euler(self) -> np.ndarray:
+ """Return the detector Euler angles (Bunge convention: ZXZ) in degrees."""
+ return np.array([self.azimuthal, 90.0 + self.tilt, self.twist], dtype=float)
+
@property
def specimen_scintillator_distance(self) -> np.ndarray:
"""Return the specimen to scintillator distance, known in EMsoft
@@ -495,6 +512,60 @@ def r_max(self) -> np.ndarray:
corners[..., 3] = self.x_min**2 + self.y_min**2 # Lo. left
return np.atleast_2d(np.sqrt(np.max(corners, axis=-1)))
+ @property
+ def coordinate_conversion_factors(self) -> dict:
+ """Return factors for converting coordinates on the detector
+ from pixel units to gnomonic units or vice versa.
+
+ The dict returned contains the keys "pix_to_gn",
+ containing factors for converting pixel to gnomonic
+ coordinates, and "gn_to_pix", containing factors for
+ converting gnomonic to pixel coordinates.
+ Under each of these keys is a further dict with the
+ keys: "m_x", "c_x", "m_y" and "c_y". These are the
+ slope (m) and y-intercept (c) corresponding to
+ y = mx + c, which describes the linear conversion
+ of the coordinates. A (different) linear relationship
+ is required for x (column) and y(row) coordinates,
+ hence the two sets of m and c parameters.
+ The shape of each array of conversion factors
+ typically corresponds to the navigation shape
+ of an EBSDDetector.
+ """
+ return get_coordinate_conversions(self.gnomonic_bounds, self.bounds)
+
+ @property
+ def sample_to_detector(self) -> Rotation:
+ """Return the orientation matrix which transforms
+ vectors in the sample reference frame, CSs, to the
+ detector reference frame, CSd.
+
+ Notes
+ -----
+
+ This is the matrix U_s as defined in the paper by
+ Britton et al. :cite:`britton2016tutorial`.
+
+ The return value has type orix.quaternion.Rotation.
+ To obtain a np.ndarray from this, call
+ u_s_rot.to_matrix(), or u_s_rot.to_matrix().squeeze(),
+ to obtain a 2D, 3x3 array.
+
+ The matrix describing the reverse transformation, i.e.
+ from the detector reference frame, CSd, to the sample
+ reference frame, CSs, can be obtained like this:
+ ~EBSDDetector.sample_to_detector
+ """
+ u_sample = Rotation.from_euler([0, self.sample_tilt, 0], degrees=True)
+ u_d = Rotation.from_euler(self.euler, degrees=True)
+ u_d_g = u_d.to_matrix().squeeze()
+ u_detector = Rotation.from_matrix(u_d_g.T)
+ u_s_bruker = u_sample * u_detector
+ sample_to_detector = (
+ Rotation.from_axes_angles((0, 0, -1), -np.pi / 2) * u_s_bruker
+ )
+ return sample_to_detector
+
@classmethod
def load(cls, fname: Path | str) -> EBSDDetector:
"""Return an EBSD detector loaded from a text file saved with
@@ -518,6 +589,7 @@ def load(cls, fname: Path | str) -> EBSDDetector:
"binning",
"tilt",
"azimuthal",
+ "twist",
"sample_tilt",
"convention",
"navigation_shape",
@@ -545,7 +617,7 @@ def load(cls, fname: Path | str) -> EBSDDetector:
detector_kw[k] = tuple(int(i) for i in shape[1:-1].split(","))
except ValueError: # pragma: no cover
detector_kw[k] = None
- for k in ["px_size", "binning", "tilt", "azimuthal", "sample_tilt"]:
+ for k in ["px_size", "binning", "tilt", "azimuthal", "twist", "sample_tilt"]:
value = detector_kw[k].rstrip(" deg")
try:
detector_kw[k] = float(value)
@@ -556,7 +628,101 @@ def load(cls, fname: Path | str) -> EBSDDetector:
if isinstance(nav_shape, tuple):
pc = pc.reshape(nav_shape + (3,))
- return cls(pc=pc, **detector_kw)
+ det = cls(pc=pc, **detector_kw)
+
+ return det
+
+ def convert_detector_coordinates(
+ self,
+ coords: np.ndarray,
+ direction: str,
+ detector_index: Union[None, int, tuple] = None,
+ ) -> np.ndarray:
+ """Convert between gnomonic and pixel coordinates on the detector screen.
+
+
+ Parameters
+ ----------
+ coords
+ A 2D array of coordinates of any shape whereby the
+ x and y coordinates to be converted are stored in
+ the last axis.
+ direction
+ Either "pix_to_gn" or "gn_to_pix", depending on the
+ direction of conversion needed.
+ detector_index
+ Index showing which conversion factors in *conversions[direction]*
+ should be applied to *coords*.
+ If None, **all** conversion factors in *conversions[direction]*
+ are applied to *coords*.
+ If an int is supplied, this refers to an index in a 1D dataset.
+ A 1D tuple *e.g.* (3,) can also be passed for a 1D dataset.
+ A 2D index can be specified by supplying a tuple *e.g.* (2, 3).
+ The default value is None
+
+ Returns
+ -------
+ coords_out
+ Array of coords but with values converted as specified
+ by direction. The shape is either the same as the input
+ or is the navigation shape then the shape of the input.
+
+ Examples
+ --------
+
+ Convert a single point on the detector in pixel coordinates into
+ gnomonic coordinates for all patterns in the dataset.
+
+ >>> import numpy as np
+ >>> import kikuchipy as kp
+ >>> s = kp.data.nickel_ebsd_small()
+ >>> det = s.detector
+ >>> det.navigation_shape
+ (3, 3)
+ >>> coords = np.array([[36.2, 12.7]])
+ >>> coords.shape
+ (1, 2)
+ >>> coords_out = det.convert_detector_coordinates(coords, "pix_to_gn", None)
+ >>> coords_out.shape
+ (3, 3, 1, 2)
+ >>> coords_out.squeeze()
+ array([[[ 0.36223464, 0.00664684],
+ [ 0.35762801, -0.00304659],
+ [ 0.35361398, -0.00042112]],
+
+ [[ 0.36432453, 0.00973461],
+ [ 0.35219231, 0.00567801],
+ [ 0.34417285, 0.00404584]],
+
+ [[ 0.36296371, 0.00072557],
+ [ 0.34447751, 0.00538137],
+ [ 0.36136688, 0.00180754]]])
+
+ Convert three points on the detector in pixel coordinates into
+ gnomonic coordinates for the pattern at navigation index (1, 2)
+ in the dataset.
+
+ >>> import numpy as np
+ >>> import kikuchipy as kp
+ >>> s = kp.data.nickel_ebsd_small()
+ >>> det = s.detector
+ >>> det.navigation_shape
+ (3, 3)
+ >>> coords = np.array([[36.2, 12.7], [2.5, 43.7], [8.2, 27.7]])
+ >>> coords.shape
+ (3, 2)
+ >>> coords_out = det.convert_detector_coordinates(coords, "pix_to_gn", (1, 2))
+ >>> coords_out.shape
+ (3, 2)
+ >>> coords_out
+ array([[ 0.34417285, 0.00404584],
+ [-0.77639565, -1.02674418],
+ [-0.58686329, -0.49472353]])
+ """
+ coords_out = convert_coordinates(
+ coords, direction, self.coordinate_conversion_factors, detector_index
+ )
+ return coords_out
def crop(self, extent: tuple[int, int, int, int] | list[int]) -> EBSDDetector:
"""Return a new detector with its :attr:`shape` cropped and
@@ -577,9 +743,9 @@ def crop(self, extent: tuple[int, int, int, int] | list[int]) -> EBSDDetector:
>>> import kikuchipy as kp
>>> det = kp.detectors.EBSDDetector((6, 6), pc=[3 / 6, 2 / 6, 0.5])
>>> det
- EBSDDetector(shape=(6, 6), pc=(0.5, 0.333, 0.5), sample_tilt=70.0, tilt=0.0, azimuthal=0.0, binning=1.0, px_size=1.0 um)
+ EBSDDetector(shape=(6, 6), pc=(0.5, 0.333, 0.5), sample_tilt=70.0, tilt=0.0, azimuthal=0.0, twist=0.0, binning=1.0, px_size=1.0 um)
>>> det.crop((1, 5, 2, 6))
- EBSDDetector(shape=(4, 4), pc=(0.25, 0.25, 0.75), sample_tilt=70.0, tilt=0.0, azimuthal=0.0, binning=1.0, px_size=1.0 um)
+ EBSDDetector(shape=(4, 4), pc=(0.25, 0.25, 0.75), sample_tilt=70.0, tilt=0.0, azimuthal=0.0, twist=0.0, binning=1.0, px_size=1.0 um)
Plot a cropped detector with the PC on a cropped pattern
@@ -1515,12 +1681,12 @@ def plot_pc(
... shape=(480, 640), pc=(0.4, 0.3, 0.5), px_size=70, sample_tilt=70
... )
>>> det0
- EBSDDetector(shape=(480, 640), pc=(0.4, 0.3, 0.5), sample_tilt=70.0, tilt=0.0, azimuthal=0.0, binning=1.0, px_size=70.0 um)
+ EBSDDetector(shape=(480, 640), pc=(0.4, 0.3, 0.5), sample_tilt=70.0, tilt=0.0, azimuthal=0.0, twist=0.0, binning=1.0, px_size=70.0 um)
>>> det = det0.extrapolate_pc(
... pc_indices=[0, 0], navigation_shape=(5, 10), step_sizes=(20, 20)
... )
>>> det
- EBSDDetector(shape=(480, 640), pc=(0.398, 0.299, 0.5), sample_tilt=70.0, tilt=0.0, azimuthal=0.0, binning=1.0, px_size=70.0 um)
+ EBSDDetector(shape=(480, 640), pc=(0.398, 0.299, 0.5), sample_tilt=70.0, tilt=0.0, azimuthal=0.0, twist=0.0, binning=1.0, px_size=70.0 um)
Plot PC values in maps
@@ -1658,6 +1824,7 @@ def save(self, filename: str | Path, convention: str = "Bruker", **kwargs) -> No
f" binning: {self.binning}\n"
f" tilt: {self.tilt} deg\n"
f" azimuthal: {self.azimuthal} deg\n"
+ f" twist: {self.twist} deg\n"
f" sample_tilt: {self.sample_tilt} deg\n"
f" convention: {convention}\n"
f" navigation_shape: {self.navigation_shape}\n\n"
diff --git a/src/kikuchipy/indexing/_refinement/_objective_functions.py b/src/kikuchipy/indexing/_refinement/_objective_functions.py
index 8b94abd5..adf29271 100644
--- a/src/kikuchipy/indexing/_refinement/_objective_functions.py
+++ b/src/kikuchipy/indexing/_refinement/_objective_functions.py
@@ -22,6 +22,7 @@
import numpy as np
+from kikuchipy._utils._gnonomic_bounds import get_gnomonic_bounds
from kikuchipy._utils.numba import rotation_from_euler
from kikuchipy.indexing.similarity_metrics._normalized_cross_correlation import (
_ncc_single_patterns_1d_float32_exp_centered,
@@ -94,27 +95,25 @@ def _refine_pc_objective_function(x: np.ndarray, *args) -> float:
7. 1D signal mask
8. Number of detector rows
9. Number of detector columns
- 10. Detector tilt
- 11. Detector azimuthal angle
- 12. Sample tilt
- 13. Squared norm of centered experimental pattern as 32-bit
+ 10. Orientation matrix detector to sample coordinates
+ 11. Squared norm of centered experimental pattern as 32-bit
float
Returns
-------
Normalized cross-correlation score.
"""
+ gn_bds = get_gnomonic_bounds(args[8], args[9], x[0], x[1], x[2])
+
dc = _get_direction_cosines_for_fixed_pc(
- pcx=x[0],
- pcy=x[1],
+ gnomonic_bounds=gn_bds,
pcz=x[2],
nrows=args[8],
ncols=args[9],
- tilt=args[10],
- azimuthal=args[11],
- sample_tilt=args[12],
+ om_detector_to_sample=args[10],
signal_mask=args[7],
)
+
simulated = _project_single_pattern_from_master_pattern(
rotation=args[1],
direction_cosines=dc,
@@ -129,7 +128,7 @@ def _refine_pc_objective_function(x: np.ndarray, *args) -> float:
dtype_out=np.float32,
)
return 1 - _ncc_single_patterns_1d_float32_exp_centered(
- args[0], simulated, args[13]
+ args[0], simulated, args[11]
)
@@ -152,29 +151,27 @@ def _refine_orientation_pc_objective_function(x: np.ndarray, *args) -> float:
4. Number of master pattern rows
5. Master pattern scale
6. 1D signal mask
- 7. Number of pattern rows
- 8. Number of pattern columns
- 9. Detector tilt
- 10. Detector azimuthal angle
- 11. Sample tilt
- 12. Squared norm of centered experimental pattern as 32-bit
+ 7. Number of detector rows
+ 8. Number of detector columns
+ 9. Orientation matrix detector to sample coordinates
+ 10. Squared norm of centered experimental pattern as 32-bit
float
Returns
-------
normalized cross-correlation score.
"""
+ gn_bds = get_gnomonic_bounds(args[7], args[8], x[3], x[4], x[5])
+
dc = _get_direction_cosines_for_fixed_pc(
- pcx=x[3],
- pcy=x[4],
+ gnomonic_bounds=gn_bds,
pcz=x[5],
nrows=args[7],
ncols=args[8],
- tilt=args[9],
- azimuthal=args[10],
- sample_tilt=args[11],
+ om_detector_to_sample=args[9],
signal_mask=args[6],
)
+
simulated = _project_single_pattern_from_master_pattern(
rotation=rotation_from_euler(*x[:3]),
direction_cosines=dc,
@@ -189,5 +186,5 @@ def _refine_orientation_pc_objective_function(x: np.ndarray, *args) -> float:
dtype_out=np.float32,
)
return 1 - _ncc_single_patterns_1d_float32_exp_centered(
- args[0], simulated, args[12]
+ args[0], simulated, args[10]
)
diff --git a/src/kikuchipy/indexing/_refinement/_refinement.py b/src/kikuchipy/indexing/_refinement/_refinement.py
index d88c0334..0a93ac18 100644
--- a/src/kikuchipy/indexing/_refinement/_refinement.py
+++ b/src/kikuchipy/indexing/_refinement/_refinement.py
@@ -397,9 +397,7 @@ def _refine_orientation(
pcz,
nrows=detector.nrows,
ncols=detector.ncols,
- tilt=detector.tilt,
- azimuthal=detector.azimuthal,
- sample_tilt=detector.sample_tilt,
+ om_detector_to_sample=(~detector.sample_to_detector).to_matrix().squeeze(),
signal_mask=signal_mask,
solver_kwargs=ref.solver_kwargs,
n_pseudo_symmetry_ops=ref.n_pseudo_symmetry_ops,
@@ -451,9 +449,7 @@ def _refine_orientation_chunk_scipy(
direction_cosines: np.ndarray | None = None,
nrows: int | None = None,
ncols: int | None = None,
- tilt: float | None = None,
- azimuthal: float | None = None,
- sample_tilt: float | None = None,
+ om_detector_to_sample: np.ndarray | None = None,
n_pseudo_symmetry_ops: int = 0,
):
"""Refine orientations from patterns in one dask array chunk using
@@ -483,9 +479,7 @@ def _refine_orientation_chunk_scipy(
pcz=float(pcz[i, 0, 0]),
nrows=nrows,
ncols=ncols,
- tilt=tilt,
- azimuthal=azimuthal,
- sample_tilt=sample_tilt,
+ om_detector_to_sample=om_detector_to_sample,
signal_mask=signal_mask,
n_pseudo_symmetry_ops=n_pseudo_symmetry_ops,
**solver_kwargs,
@@ -519,9 +513,7 @@ def _refine_orientation_chunk_nlopt(
direction_cosines: np.ndarray | None = None,
nrows: int | None = None,
ncols: int | None = None,
- tilt: float | None = None,
- azimuthal: float | None = None,
- sample_tilt: float | None = None,
+ om_detector_to_sample: np.ndarray | None = None,
n_pseudo_symmetry_ops: int = 0,
):
"""Refine orientations from patterns in one dask array chunk using
@@ -556,9 +548,7 @@ def _refine_orientation_chunk_nlopt(
pcz=float(pcz[i, 0, 0]),
nrows=nrows,
ncols=ncols,
- tilt=tilt,
- azimuthal=azimuthal,
- sample_tilt=sample_tilt,
+ om_detector_to_sample=om_detector_to_sample,
n_pseudo_symmetry_ops=n_pseudo_symmetry_ops,
**solver_kwargs,
)
@@ -1184,9 +1174,7 @@ def set_fixed_parameters(
signal_mask,
nrows,
ncols,
- detector.tilt,
- detector.azimuthal,
- detector.sample_tilt,
+ (~detector.sample_to_detector).to_matrix().squeeze(),
)
self.fixed_parameters = params
diff --git a/src/kikuchipy/indexing/_refinement/_solvers.py b/src/kikuchipy/indexing/_refinement/_solvers.py
index 9fe346b3..05564f3f 100644
--- a/src/kikuchipy/indexing/_refinement/_solvers.py
+++ b/src/kikuchipy/indexing/_refinement/_solvers.py
@@ -25,6 +25,7 @@
from numba import njit
import numpy as np
+from kikuchipy._utils._gnonomic_bounds import get_gnomonic_bounds
from kikuchipy.indexing._refinement import SUPPORTED_OPTIMIZATION_METHODS
from kikuchipy.indexing._refinement._objective_functions import (
_refine_orientation_objective_function,
@@ -88,9 +89,7 @@ def _refine_orientation_solver_scipy(
pcz: float | None = None,
nrows: int | None = None,
ncols: int | None = None,
- tilt: float | None = None,
- azimuthal: float | None = None,
- sample_tilt: float | None = None,
+ om_detector_to_sample: np.ndarray | None = None,
n_pseudo_symmetry_ops: int = 0,
) -> (
tuple[float, int, float, float, float] | tuple[float, int, int, float, float, float]
@@ -139,15 +138,12 @@ def _refine_orientation_solver_scipy(
ncols
Number of detector columns. Must be passed if
``direction_cosines`` is not given.
- tilt
- Detector tilt from horizontal in degrees. Must be passed if
- ``direction_cosines`` is not given.
- azimuthal
- Sample tilt about the sample RD axis in degrees. Must be passed
+ om_detector_to_sample
+ Orientation matrix used for transforming from the detector
+ reference frame to the sample reference frame. This is the
+ inverse of EBSDDetector.sample_to_detector expressed as a
+ 3x3 numpy array rather than an orix Rotation. Must be passed
if ``direction_cosines`` is not given.
- sample_tilt
- Sample tilt from horizontal in degrees. Must be passed if
- ``direction_cosines`` is not given.
n_pseudo_symmetry_ops
Number of pseudo-symmetry operators. Default is 0.
@@ -163,15 +159,14 @@ def _refine_orientation_solver_scipy(
pattern, squared_norm = _prepare_pattern(pattern, rescale)
if direction_cosines is None:
+ gn_bds = get_gnomonic_bounds(nrows, ncols, pcx, pcy, pcz)
+
direction_cosines = _get_direction_cosines_for_fixed_pc(
- pcx=pcx,
- pcy=pcy,
+ gnomonic_bounds=gn_bds,
pcz=pcz,
nrows=nrows,
ncols=ncols,
- tilt=tilt,
- azimuthal=azimuthal,
- sample_tilt=sample_tilt,
+ om_detector_to_sample=om_detector_to_sample,
signal_mask=signal_mask,
)
@@ -478,9 +473,7 @@ def _refine_orientation_solver_nlopt(
pcz: float | None = None,
nrows: int | None = None,
ncols: int | None = None,
- tilt: float | None = None,
- azimuthal: float | None = None,
- sample_tilt: float | None = None,
+ om_detector_to_sample: np.ndarray | None = None,
n_pseudo_symmetry_ops: int = 0,
) -> (
tuple[float, int, float, float, float] | tuple[float, int, int, float, float, float]
@@ -489,15 +482,14 @@ def _refine_orientation_solver_nlopt(
# Get direction cosines if a unique PC per pattern is used
if direction_cosines is None:
+ gn_bds = get_gnomonic_bounds(nrows, ncols, pcx, pcy, pcz)
+
direction_cosines = _get_direction_cosines_for_fixed_pc(
- pcx=pcx,
- pcy=pcy,
+ gnomonic_bounds=gn_bds,
pcz=pcz,
nrows=nrows,
ncols=ncols,
- tilt=tilt,
- azimuthal=azimuthal,
- sample_tilt=sample_tilt,
+ om_detector_to_sample=om_detector_to_sample,
signal_mask=signal_mask,
)
diff --git a/src/kikuchipy/signals/ebsd.py b/src/kikuchipy/signals/ebsd.py
index 406b0a7b..9d1ce903 100644
--- a/src/kikuchipy/signals/ebsd.py
+++ b/src/kikuchipy/signals/ebsd.py
@@ -153,7 +153,7 @@ class EBSD(KikuchipySignal2D):
>>> s
>>> s.detector
- EBSDDetector(shape=(60, 60), pc=(0.425, 0.213, 0.501), sample_tilt=70.0, tilt=0.0, azimuthal=0.0, binning=8.0, px_size=1.0 um)
+ EBSDDetector(shape=(60, 60), pc=(0.425, 0.213, 0.501), sample_tilt=70.0, tilt=0.0, azimuthal=0.0, twist=0.0, binning=8.0, px_size=1.0 um)
>>> s.static_background
array([[84, 87, 90, ..., 27, 29, 30],
[87, 90, 93, ..., 27, 28, 30],
diff --git a/src/kikuchipy/signals/util/_master_pattern.py b/src/kikuchipy/signals/util/_master_pattern.py
index 36d641ef..c975bc9e 100644
--- a/src/kikuchipy/signals/util/_master_pattern.py
+++ b/src/kikuchipy/signals/util/_master_pattern.py
@@ -99,9 +99,11 @@ def _get_direction_cosines_from_detector(
"""
if detector.navigation_shape == (1,):
pcx, pcy, pcz = detector.pc.squeeze().astype(np.float64)
+ gnomonic_bounds = detector.gnomonic_bounds.squeeze().astype(np.float64)
func = _get_direction_cosines_for_fixed_pc
else:
pcx, pcy, pcz = detector.pc_flattened.T.astype(np.float64)
+ gnomonic_bounds = detector.gnomonic_bounds.reshape((-1, 4)).astype(np.float64)
func = _get_direction_cosines_for_varying_pc
if signal_mask is None:
@@ -109,14 +111,11 @@ def _get_direction_cosines_from_detector(
signal_mask = np.ones(detector.size, dtype=bool)
dc = func(
- pcx=pcx,
- pcy=pcy,
+ gnomonic_bounds=gnomonic_bounds,
pcz=pcz,
nrows=detector.nrows,
ncols=detector.ncols,
- tilt=detector.tilt,
- azimuthal=detector.azimuthal,
- sample_tilt=detector.sample_tilt,
+ om_detector_to_sample=(~detector.sample_to_detector).to_matrix().squeeze(),
signal_mask=signal_mask,
)
@@ -124,61 +123,39 @@ def _get_direction_cosines_from_detector(
@njit(
- "Tuple((float64, float64, float64, float64))(float64, float64, float64)",
- cache=True,
- nogil=True,
- fastmath=True,
-)
-def _get_cosine_sine_of_alpha_and_azimuthal(
- sample_tilt: float, tilt: float, azimuthal: float
-) -> tuple[float, float, float, float]:
- alpha = (np.pi / 2) - np.deg2rad(sample_tilt) + np.deg2rad(tilt)
- azimuthal = np.deg2rad(azimuthal)
- return np.cos(alpha), np.sin(alpha), np.cos(azimuthal), np.sin(azimuthal)
-
-
-@njit(
- (
- "float64[:, :]"
- "(float64, float64, float64, int64, int64, float64, float64, float64, bool_[:])"
- ),
+ ("float64[:, :]" "(float64[:], float64, int64, int64, float64[:, ::1], bool_[:])"),
cache=True,
nogil=True,
fastmath=True,
)
def _get_direction_cosines_for_fixed_pc(
- pcx: float,
- pcy: float,
+ gnomonic_bounds: np.ndarray,
pcz: float,
nrows: int,
ncols: int,
- tilt: float,
- azimuthal: float,
- sample_tilt: float,
+ om_detector_to_sample: np.ndarray,
signal_mask: np.ndarray,
) -> np.ndarray:
"""Return direction cosines for a single projection center (PC).
- Algorithm adapted from EMsoft, see :cite:`callahan2013dynamical`.
+ Algorithm adapted from that used in :cite:`britton2016tutorial`.
Parameters
----------
- pcx
- PC x coordinate.
- pcy
- PC y coordinate.
+ gnomonic_bounds
+ Bounds of the detector in gnomonic coordinates.
pcz
PC z coordinate.
nrows
Number of detector rows.
ncols
Number of detector columns.
- tilt
- Detector tilt from horizontal in degrees.
- azimuthal
- Sample tilt about the sample RD axis in degrees.
- sample_tilt
- Sample tilt from horizontal in degrees.
+ om_detector_to_sample
+ The orientation matrix which transforms
+ vectors in the detector reference frame, CSd,
+ to the sample reference frame, CSs. This is
+ the inverse rotation of the
+ EBSDDetector.sample_to_detector property.
signal_mask
1D signal mask with ``True`` values for pixels to get direction
cosines for.
@@ -197,35 +174,33 @@ def _get_direction_cosines_for_fixed_pc(
-----
This function is optimized with Numba, so care must be taken with
array shapes and data types.
- """
- nrows_array = np.arange(nrows - 1, -1, -1)
- ncols_array = np.arange(ncols)
- # Bruker to EMsoft's v5 PC convention
- xpc = ncols * (0.5 - pcx)
- ypc = nrows * (0.5 - pcy)
- zpc = nrows * pcz
+ A previous version of this algorithm was adapted from EMsoft,
+ see :cite:`callahan2013dynamical`.
+ """
+ x_scale = (gnomonic_bounds[1] - gnomonic_bounds[0]) / ncols
+ y_scale = (gnomonic_bounds[3] - gnomonic_bounds[2]) / nrows
- det_x = xpc + (1 - ncols) * 0.5 + ncols_array
- det_y = ypc - (1 - nrows) * 0.5 - nrows_array
+ det_gn_x = np.arange(gnomonic_bounds[0], gnomonic_bounds[1], x_scale)
- ca, sa, cw, sw = _get_cosine_sine_of_alpha_and_azimuthal(
- sample_tilt=sample_tilt,
- tilt=tilt,
- azimuthal=azimuthal,
- )
- Ls = -sw * det_x + zpc * cw
- Lc = cw * det_x + zpc * sw
+ det_gn_y = np.arange(gnomonic_bounds[3], gnomonic_bounds[2], -y_scale)
idx_1d = np.arange(nrows * ncols)[signal_mask]
rows = idx_1d // ncols
cols = np.mod(idx_1d, ncols)
+
n_pixels = idx_1d.size
r_g_array = np.zeros((n_pixels, 3), dtype=np.float64)
+
+ x_half_step = np.true_divide(x_scale, 2)
+ y_half_step = np.true_divide(y_scale, 2)
+
for i in nb.prange(n_pixels):
- r_g_array[i, 0] = det_y[rows[i]] * ca + sa * Ls[cols[i]]
- r_g_array[i, 1] = Lc[cols[i]]
- r_g_array[i, 2] = -sa * det_y[rows[i]] + ca * Ls[cols[i]]
+ r_g_array[i, 0] = (det_gn_x[cols[i]] + x_half_step) * pcz
+ r_g_array[i, 1] = (det_gn_y[rows[i]] - y_half_step) * pcz
+ r_g_array[i, 2] = pcz
+
+ r_g_array = np.dot(r_g_array, om_detector_to_sample)
# Normalize
norm = np.sqrt(np.sum(np.square(r_g_array), axis=-1))
@@ -238,46 +213,42 @@ def _get_direction_cosines_for_fixed_pc(
@njit(
(
"float64[:, :, :]"
- "(float64[:], float64[:], float64[:], int64, int64, float64, float64, float64, bool_[:])"
+ "(float64[:, :], float64[:], int64, int64, float64[:, ::1], bool_[:])"
),
cache=True,
nogil=True,
fastmath=True,
)
def _get_direction_cosines_for_varying_pc(
- pcx: np.ndarray,
- pcy: np.ndarray,
+ gnomonic_bounds: np.ndarray,
pcz: np.ndarray,
nrows: int,
ncols: int,
- tilt: float,
- azimuthal: float,
- sample_tilt: float,
+ om_detector_to_sample: np.ndarray,
signal_mask: np.ndarray,
) -> np.ndarray:
"""Return sets of direction cosines for varying projection centers
(PCs).
- Algorithm adapted from EMsoft, see :cite:`callahan2013dynamical`.
+ Algorithm adapted from that used in :cite:`britton2016tutorial`.
Parameters
----------
- pcx
- PC x coordinates. Must be a 1D array.
- pcy
- PC y coordinates. Must be a 1D array.
+ gnomonic_bounds
+ Bounds of the detector in gnomonic coordinates, one for
+ each PC.
pcz
- PC z coordinates. Must be a 1D array.
+ PC z coordinate for each PC.
nrows
Number of detector rows.
ncols
Number of detector columns.
- tilt
- Detector tilt from horizontal in degrees.
- azimuthal
- Sample tilt about the sample RD axis in degrees.
- sample_tilt
- Sample tilt from horizontal in degrees.
+ om_detector_to_sample
+ The orientation matrix which transforms
+ vectors in the detector reference frame, CSd, to the
+ sample reference frame, CSs. One matrix valid for
+ ALL PCs. This is the inverse rotation of the
+ EBSDDetector.sample_to_detector property.
signal_mask
1D signal mask with ``True`` values for pixels to get direction
cosines for.
@@ -297,42 +268,35 @@ def _get_direction_cosines_for_varying_pc(
This function is optimized with Numba, so care must be taken with
array shapes and data types.
"""
- nrows_array = np.arange(nrows - 1, -1, -1)
- ncols_array = np.arange(ncols)
-
- ca, sa, cw, sw = _get_cosine_sine_of_alpha_and_azimuthal(
- sample_tilt=sample_tilt,
- tilt=tilt,
- azimuthal=azimuthal,
- )
-
- det_x_factor = (1 - ncols) * 0.5
- det_y_factor = (1 - nrows) * 0.5
-
idx_1d = np.arange(nrows * ncols)[signal_mask]
rows = idx_1d // ncols
cols = np.mod(idx_1d, ncols)
- n_pcs = pcx.size
+ n_pcs = pcz.size
n_pixels = idx_1d.size
r_g_array = np.zeros((n_pcs, n_pixels, 3), dtype=np.float64)
for i in nb.prange(n_pcs):
- # Bruker to EMsoft's v5 PC convention
- xpc = ncols * (0.5 - pcx[i])
- ypc = nrows * (0.5 - pcy[i])
- zpc = nrows * pcz[i]
+ x_scale = (gnomonic_bounds[i, 1] - gnomonic_bounds[i, 0]) / ncols
+ y_scale = (gnomonic_bounds[i, 3] - gnomonic_bounds[i, 2]) / nrows
+
+ det_gn_x = np.arange(gnomonic_bounds[i, 0], gnomonic_bounds[i, 1], x_scale)
- det_x = xpc + det_x_factor + ncols_array
- det_y = ypc - det_y_factor - nrows_array
+ det_gn_y = np.arange(gnomonic_bounds[i, 3], gnomonic_bounds[i, 2], -y_scale)
- Ls = -sw * det_x + zpc * cw
- Lc = cw * det_x + zpc * sw
+ x_half_step = np.true_divide(x_scale, 2)
+ y_half_step = np.true_divide(y_scale, 2)
+
+ r_g_a = np.zeros((n_pixels, 3), dtype=np.float64)
for j in nb.prange(n_pixels):
- r_g_array[i, j, 0] = det_y[rows[j]] * ca + sa * Ls[cols[j]]
- r_g_array[i, j, 1] = Lc[cols[j]]
- r_g_array[i, j, 2] = -sa * det_y[rows[j]] + ca * Ls[cols[j]]
+ r_g_a[j, 0] = (det_gn_x[cols[j]] + x_half_step) * pcz[i]
+ r_g_a[j, 1] = (det_gn_y[rows[j]] - y_half_step) * pcz[i]
+ r_g_a[j, 2] = pcz[i]
+
+ r_g_a = np.dot(r_g_a, om_detector_to_sample)
+
+ r_g_array[i] = r_g_a
# Normalize
norm = np.sqrt(np.sum(np.square(r_g_array), axis=-1))
diff --git a/src/kikuchipy/simulations/kikuchi_pattern_simulator.py b/src/kikuchipy/simulations/kikuchi_pattern_simulator.py
index a257752e..979db47f 100644
--- a/src/kikuchipy/simulations/kikuchi_pattern_simulator.py
+++ b/src/kikuchipy/simulations/kikuchi_pattern_simulator.py
@@ -255,8 +255,11 @@ def on_detector(
# Transformation from detector reference frame CSd to sample
# reference frame CSs
- total_tilt = np.deg2rad(detector.sample_tilt - 90 - detector.tilt)
- u_s_bruker = Rotation.from_axes_angles((-1, 0, 0), total_tilt)
+ u_sample = Rotation.from_euler([0, detector.sample_tilt, 0], degrees=True)
+ u_d = Rotation.from_euler(detector.euler, degrees=True)
+ u_d_g = u_d.to_matrix().squeeze()
+ u_detector = Rotation.from_matrix(u_d_g.T)
+ u_s_bruker = u_sample * u_detector
u_s_rot = Rotation.from_axes_angles((0, 0, -1), -np.pi / 2) * u_s_bruker
u_s = da.from_array(u_s_rot.to_matrix().squeeze())
diff --git a/tests/test_detectors/test_ebsd_detector.py b/tests/test_detectors/test_ebsd_detector.py
index a338b627..15b2dbb2 100644
--- a/tests/test_detectors/test_ebsd_detector.py
+++ b/tests/test_detectors/test_ebsd_detector.py
@@ -22,6 +22,7 @@
import matplotlib.pyplot as plt
import numpy as np
from orix.crystal_map import PhaseList
+from orix.quaternion import Rotation
import pytest
import kikuchipy as kp
@@ -112,11 +113,11 @@ def test_detector_dimensions(
def test_repr(self, pc1):
"""Expected string representation."""
det = kp.detectors.EBSDDetector(
- shape=(1, 2), px_size=3, binning=4, tilt=5, azimuthal=2, pc=pc1
+ shape=(1, 2), px_size=3, binning=4, tilt=5, azimuthal=2, twist=1.02, pc=pc1
)
assert repr(det) == (
"EBSDDetector(shape=(1, 2), pc=(0.421, 0.779, 0.505), sample_tilt=70.0, "
- "tilt=5.0, azimuthal=2.0, binning=4.0, px_size=3.0 um)"
+ "tilt=5.0, azimuthal=2.0, twist=1.02, binning=4.0, px_size=3.0 um)"
)
def test_deepcopy(self, pc1):
@@ -212,6 +213,132 @@ def test_gnomonic_scale(self, pc1, shape, desired_x_scale, desired_y_scale):
assert np.allclose(detector.x_scale, desired_x_scale, atol=1e-6)
assert np.allclose(detector.y_scale, desired_y_scale, atol=1e-6)
+ @pytest.mark.parametrize(
+ "tilt, azimuthal, twist, sample_tilt, expected_angle, expected_axis, expected_rotation",
+ [
+ (
+ 0,
+ 0,
+ 0,
+ 90,
+ 90.0,
+ np.array([0.0, 0.0, 1.0]),
+ np.array([[0.70710678, 0.0, 0.0, 0.70710678]]),
+ ),
+ (
+ 0,
+ 0,
+ 0,
+ 70,
+ 91.7279410723505,
+ np.array([0.17108787, 0.17108787, 0.97028753]),
+ np.array([[0.69636424, 0.1227878, 0.1227878, 0.69636424]]),
+ ),
+ (
+ 8.3,
+ 4.7,
+ 1.02,
+ 70,
+ 94.94104636971042,
+ np.array([0.19765823, 0.27176174, 0.94184754]),
+ np.array([[0.67596942, 0.14566022, 0.20026929, 0.69407539]]),
+ ),
+ ],
+ )
+ def test_sample_to_detector(
+ self,
+ tilt,
+ azimuthal,
+ twist,
+ sample_tilt,
+ expected_angle,
+ expected_axis,
+ expected_rotation,
+ ):
+ """Check the Rotation sample_to_detector has the correct type and values."""
+ detector = kp.detectors.EBSDDetector(
+ tilt=tilt, azimuthal=azimuthal, twist=twist, sample_tilt=sample_tilt
+ )
+ smpl_to_det = detector.sample_to_detector
+
+ assert isinstance(smpl_to_det, Rotation)
+ assert np.allclose(np.rad2deg(smpl_to_det.angle)[0], expected_angle)
+ assert np.allclose(smpl_to_det.axis.data.squeeze(), expected_axis)
+ assert np.allclose(smpl_to_det.data, expected_rotation)
+
+ def test_coordinate_conversion_factors(self):
+ """Factors for converting between pixel and gonomic coords."""
+ s = kp.data.nickel_ebsd_small()
+ det_1d = kp.detectors.EBSDDetector(shape=(60, 60), pc=s.detector.pc[0,])
+ conv_1d = det_1d.coordinate_conversion_factors
+
+ exp_res_1d = {
+ "pix_to_gn": {
+ "m_x": np.array([0.03319923, 0.03326385, 0.03330547]),
+ "c_x": np.array([-0.83957734, -0.84652344, -0.85204404]),
+ "m_y": np.array([-0.03319923, -0.03326385, -0.03330547]),
+ "c_y": np.array([0.42827701, 0.41940433, 0.42255835]),
+ },
+ "gn_to_pix": {
+ "m_x": np.array([30.12118421, 30.06266362, 30.02509794]),
+ "c_x": np.array([25.28906376, 25.4487495, 25.58270568]),
+ "m_y": np.array([-30.12118421, -30.06266362, -30.02509794]),
+ "c_y": np.array([12.90021062, 12.60841133, 12.6873559]),
+ },
+ }
+
+ for i in ["pix_to_gn", "gn_to_pix"]:
+ for j in ["m_x", "c_x", "m_y", "c_y"]:
+ assert np.allclose(conv_1d[i][j], exp_res_1d[i][j])
+
+ @pytest.mark.parametrize(
+ "coords, detector_index, desired_coords",
+ [
+ (
+ np.array([[36.2, 12.7]]),
+ None,
+ np.array(
+ [
+ [[[0.36223463, 0.00664684]], [[0.357628, -0.00304659]]],
+ [[[0.36432453, 0.00973462]], [[0.35219232, 0.00567801]]],
+ ]
+ ),
+ ),
+ (
+ np.array([[36.2, 12.7], [2.5, 43.7], [8.2, 27.7]]),
+ (0, 1),
+ np.array(
+ [
+ [0.35762801, -0.00304659],
+ [-0.76336381, -1.03422601],
+ [-0.57375985, -0.50200438],
+ ]
+ ),
+ ),
+ ],
+ )
+ def test_coordinate_conversions(self, coords, detector_index, desired_coords):
+ """Converting from pixel to gnomonic coords and back."""
+ pc = np.array(
+ [
+ [
+ [0.4214844, 0.21500351, 0.50201974],
+ [0.42414583, 0.21014019, 0.50104439],
+ ],
+ [
+ [0.42088203, 0.2165417, 0.50079336],
+ [0.42725023, 0.21450546, 0.49996293],
+ ],
+ ]
+ )
+ det = kp.detectors.EBSDDetector(shape=(60, 60), pc=pc)
+ cds_out = det.convert_detector_coordinates(coords, "pix_to_gn", detector_index)
+ cds_back = det.convert_detector_coordinates(
+ cds_out, "gn_to_pix", detector_index
+ )
+ assert np.allclose(cds_out, desired_coords)
+ assert np.allclose(cds_back[..., :], coords[..., :])
+
@pytest.mark.parametrize(
"shape, pc, px_size, binning, version, desired_pc",
[
@@ -1084,12 +1211,12 @@ def test_get_indexer(self):
class TestSaveLoadDetector:
@pytest.mark.parametrize(
- "nav_shape, shape, convention, sample_tilt, tilt, px_size, binning, azimuthal",
+ "nav_shape, shape, convention, sample_tilt, tilt, px_size, binning, azimuthal, twist",
[
- ((3, 4), (10, 20), "bruker", 70, 0, 70, 1, 0),
- ((1, 5), (5, 5), "tsl", 69.5, 3.14, 57.2, 2, 3.7),
- ((4, 3), (6, 7), "emsoft", -69.5, -3.14, 57.2, 2, -3.7),
- ((3, 2), (5, 7), "oxford", 71.3, 1.2, 90.3, 3, 0.1),
+ ((3, 4), (10, 20), "bruker", 70, 0, 70, 1, 0, 0),
+ ((1, 5), (5, 5), "tsl", 69.5, 3.14, 57.2, 2, 3.7, 0.003),
+ ((4, 3), (6, 7), "emsoft", -69.5, -3.14, 57.2, 2, -3.7, -1.23),
+ ((3, 2), (5, 7), "oxford", 71.3, 1.2, 90.3, 3, 0.1, 0.0465),
],
)
def test_save_load_detector(
@@ -1103,6 +1230,7 @@ def test_save_load_detector(
px_size,
binning,
azimuthal,
+ twist,
):
det0 = kp.detectors.EBSDDetector(
shape=shape,
@@ -1112,6 +1240,7 @@ def test_save_load_detector(
px_size=px_size,
binning=binning,
azimuthal=azimuthal,
+ twist=twist,
convention=convention,
)
det1 = det0.extrapolate_pc(
@@ -1133,6 +1262,7 @@ def test_save_load_detector(
assert np.isclose(det2.px_size, det1.px_size)
assert det2.binning == det1.binning
assert np.isclose(det2.azimuthal, det1.azimuthal)
+ assert np.isclose(det2.twist, det1.twist)
def test_save_detector_raises(self, tmp_path):
det = kp.detectors.EBSDDetector()
diff --git a/tests/test_indexing/test_ebsd_refinement.py b/tests/test_indexing/test_ebsd_refinement.py
index 68a6f1b7..a1a31bb4 100644
--- a/tests/test_indexing/test_ebsd_refinement.py
+++ b/tests/test_indexing/test_ebsd_refinement.py
@@ -1220,6 +1220,7 @@ def test_refine_orientation_pc_pseudo_symmetry_scipy(self):
# Global: Differential evolution
_, _ = s.refine_orientation_projection_center(
method="differential_evolution",
+ trust_region=[2, 2, 2, 0.05, 0.05, 0.05],
navigation_mask=nav_mask,
**ref_kw,
)
diff --git a/tests/test_io/test_nordif.py b/tests/test_io/test_nordif.py
index 1d101d17..4ce967bc 100644
--- a/tests/test_io/test_nordif.py
+++ b/tests/test_io/test_nordif.py
@@ -294,7 +294,7 @@ def test_load_to_memory(self, nordif_path):
assert isinstance(s.data, np.ndarray)
assert not isinstance(s.data, np.memmap)
- def test_load_readonly(self, nordif_path):
+ def test_load_memmap(self, nordif_path):
s = kp.load(nordif_path / "Pattern.dat", lazy=True)
keys = ["array-original", "original-array"]
k = next(
@@ -305,7 +305,6 @@ def test_load_readonly(self, nordif_path):
)
mm = s.data.dask[k]
assert isinstance(mm, np.memmap)
- assert not mm.flags["WRITEABLE"]
@pytest.mark.parametrize("lazy", [True, False])
def test_load_inplace(self, nordif_path, assert_dictionary_func, lazy):
diff --git a/tests/test_signals/test_ebsd_master_pattern.py b/tests/test_signals/test_ebsd_master_pattern.py
index bf7fab77..53e6605a 100644
--- a/tests/test_signals/test_ebsd_master_pattern.py
+++ b/tests/test_signals/test_ebsd_master_pattern.py
@@ -16,6 +16,7 @@
# along with kikuchipy. If not, see .
import dask.array as da
+from diffsims.crystallography import ReciprocalLatticeVector
from hyperspy._signals.signal2d import Signal2D
import hyperspy.api as hs
import numpy as np
@@ -24,8 +25,12 @@
import pytest
import kikuchipy as kp
+from kikuchipy._utils._detector_coordinates import (
+ convert_coordinates,
+ get_coordinate_conversions,
+)
+from kikuchipy._utils.numba import rotate_vector
from kikuchipy.signals.util._master_pattern import (
- _get_cosine_sine_of_alpha_and_azimuthal,
_get_direction_cosines_for_fixed_pc,
_get_direction_cosines_for_varying_pc,
_get_direction_cosines_from_detector,
@@ -190,23 +195,31 @@ class TestProjectFromLambert:
)
def test_get_direction_cosines(self):
+ """Make sure the Numba function is covered."""
det = self.detector
dc = _get_direction_cosines_from_detector(det)
assert dc.shape == (det.size, 3)
assert np.max(dc) <= 1
dc2 = _get_direction_cosines_for_fixed_pc.py_func(
- pcx=det.pcx,
- pcy=det.pcy,
- pcz=det.pcz,
+ gnomonic_bounds=det.gnomonic_bounds.squeeze().astype(np.float64),
+ pcz=det.pc.squeeze().astype(np.float64)[2],
+ nrows=det.nrows,
+ ncols=det.ncols,
+ om_detector_to_sample=(~det.sample_to_detector).to_matrix().squeeze(),
+ signal_mask=np.ones(det.size, dtype=bool),
+ )
+
+ dc3 = _get_direction_cosines_for_fixed_pc(
+ gnomonic_bounds=det.gnomonic_bounds.squeeze().astype(np.float64),
+ pcz=det.pc.squeeze().astype(np.float64)[2],
nrows=det.nrows,
ncols=det.ncols,
- tilt=det.tilt,
- azimuthal=det.azimuthal,
- sample_tilt=det.sample_tilt,
+ om_detector_to_sample=(~det.sample_to_detector).to_matrix().squeeze(),
signal_mask=np.ones(det.size, dtype=bool),
)
assert np.allclose(dc, dc2)
+ assert np.allclose(dc2, dc3)
def test_get_patterns(self, emsoft_ebsd_file):
emsoft_key = kp.load(emsoft_ebsd_file)
@@ -384,8 +397,8 @@ def test_detector_azimuthal(self):
sim3 = mp.get_patterns(detector=det3, **kwargs)
assert not np.allclose(sim1.data, sim2.data)
- assert np.allclose(sim2.data.mean(), 43.56, atol=1e-2)
- assert np.allclose(sim3.data.mean(), 43.39, atol=1e-2)
+ assert np.allclose(sim2.data.mean(), 43.51, atol=1e-2)
+ assert np.allclose(sim3.data.mean(), 43.30, atol=1e-2)
def test_project_patterns_from_master_pattern(self):
"""Cover the Numba functions."""
@@ -492,34 +505,41 @@ def test_get_pixel_from_master_pattern(self):
)
assert np.isclose(value, 1)
- def test_get_cosine_sine_of_alpha_and_azimuthal(self):
- """Make sure the Numba function is covered."""
- values = _get_cosine_sine_of_alpha_and_azimuthal.py_func(
- sample_tilt=70, tilt=10, azimuthal=5
- )
- assert np.allclose(values, [0.866, 0.5, 0.996, 0.087], atol=1e-3)
-
def test_get_direction_cosines_for_multiple_pcs(self):
"""Make sure the Numba function is covered."""
det = self.detector
dc0 = _get_direction_cosines_from_detector(det)
+
nav_shape = (2, 3)
det.pc = np.full(nav_shape + (3,), det.pc)
nrows, ncols = det.shape
- dc = _get_direction_cosines_for_varying_pc.py_func(
- pcx=det.pcx.ravel(),
- pcy=det.pcy.ravel(),
- pcz=det.pcz.ravel(),
- nrows=nrows,
- ncols=ncols,
- tilt=det.tilt,
- azimuthal=det.azimuthal,
- sample_tilt=det.sample_tilt,
+
+ gnomonic_bounds = det.gnomonic_bounds.reshape(
+ (np.prod(det.navigation_shape), 4)
+ ).astype(np.float64)
+ pcz = det.pc_flattened.T.astype(np.float64)[2]
+
+ dc1 = _get_direction_cosines_for_varying_pc.py_func(
+ gnomonic_bounds=gnomonic_bounds,
+ pcz=pcz,
+ nrows=det.nrows,
+ ncols=det.ncols,
+ om_detector_to_sample=(~det.sample_to_detector).to_matrix().squeeze(),
+ signal_mask=np.ones(det.size, dtype=bool),
+ )
+
+ dc2 = _get_direction_cosines_for_varying_pc(
+ gnomonic_bounds=gnomonic_bounds,
+ pcz=pcz,
+ nrows=det.nrows,
+ ncols=det.ncols,
+ om_detector_to_sample=(~det.sample_to_detector).to_matrix().squeeze(),
signal_mask=np.ones(det.size, dtype=bool),
)
- assert np.allclose(dc0, dc[0])
- assert dc.shape == (np.prod(nav_shape), nrows * ncols, 3)
+ assert np.allclose(dc0, dc1[0])
+ assert np.allclose(dc1[0], dc2[0])
+ assert dc1.shape == (np.prod(nav_shape), nrows * ncols, 3)
class TestMasterPatternPlotting:
@@ -755,3 +775,388 @@ def test_vector2xy(self):
]
assert np.allclose(_vector2lambert.py_func(xyz), lambert_xy)
assert np.allclose(_vector2lambert(xyz), lambert_xy)
+
+
+class TestFitPatternDetectorOrientation:
+ """
+ Test the fit between an EBSD pattern generated
+ from a master pattern and an associated
+ GeometricalKikuchiPatternSimulation for different
+ detector orientations.
+ """
+
+ detector = kp.detectors.EBSDDetector(
+ shape=(480, 640), px_size=50, pc=(20, 20, 15000), convention="emsoft4"
+ )
+
+ phase = kp.data.nickel_ebsd_master_pattern_small().phase
+
+ hkl = [(1, 1, 1), (2, 0, 0), (2, 2, 0), (3, 1, 1)]
+
+ ref = ReciprocalLatticeVector(phase=phase, hkl=hkl)
+ ref = ref.symmetrise()
+
+ simulator = kp.simulations.KikuchiPatternSimulator(ref)
+
+ rotations = Rotation.from_euler([23, 14, 5], degrees=True)
+
+ # Transformation from CSs to cartesian crystal reference frame CSc
+ u_o = rotations.to_matrix().squeeze()
+
+ # Transformation from CSc to direct crystal reference frame CSk
+ u_a = phase.structure.lattice.base
+
+ def setup_detector_sim_and_u_os(self, tilt, azimuthal, twist):
+ det = self.detector.deepcopy()
+ det.tilt = tilt
+ det.azimuthal = azimuthal
+ det.twist = twist
+
+ sim_lines = self.simulator.on_detector(det, self.rotations)
+
+ u_os = np.matmul(self.u_o, det.sample_to_detector.to_matrix().squeeze())
+
+ return det, sim_lines, u_os
+
+ def setup_za_and_cds(self, zone_axes, sim_lines, det):
+ """Find the indices of the zone_axes in the
+ GeometricalKikuchiPatternSimulation (sim_lines).
+ Find the gnomonic coordinates of these zone axes.
+ Then obtain convert these into pixel coordinates
+ on the detector and round to the nearest pixel.
+ Return the indices of the zone axes, the
+ conversion dict from pix to gn and back, and
+ the coordinates of the nearest detector pixels
+ to the three zone axes.
+ """
+ za_idx = [
+ index_row_in_array(sim_lines._zone_axes.vector.uvw, i) for i in zone_axes
+ ]
+
+ x_gn = sim_lines._zone_axes.x_gnomonic[0, za_idx]
+ y_gn = sim_lines._zone_axes.y_gnomonic[0, za_idx]
+ cds_gn = np.stack((x_gn, y_gn), axis=1)
+
+ conversions = get_coordinate_conversions(det.gnomonic_bounds, det.bounds)
+ cds_px = convert_coordinates(cds_gn, "gn_to_pix", conversions)
+
+ cds_px_int = np.squeeze(np.around(cds_px, decimals=0).astype(int))
+
+ return za_idx, conversions, cds_px_int
+
+ def get_a_angles(self, sim_lines, za_idx, u_os):
+ """Check that the GeometricalKikuchiPatternSimulation
+ is self-consistent. We do this by taking the vectors
+ in the detector reference frame of 3 zone axes in
+ the simulation, transforming these vectors from CSd
+ to the cartesian crystal reference frame, CSc, and
+ measuring the angle between these and vectors
+ transformed from the uvw indices of the zone axes
+ into the cartesian crystal reference frame
+ i.e. CSk to CSc. These angles (a_ang) should all be
+ zero if the GeometricalKikuchiPatternSimulation is
+ self-consistent. This is tested later.
+ The a_ang are returned.
+ """
+ # CSk to CSc:
+ uvw = sim_lines._zone_axes.vector.uvw[za_idx]
+ d_vec = np.matmul(uvw, self.u_a)
+ N1 = np.sqrt(np.sum(np.square(d_vec), axis=1))
+ d_vec_n = d_vec / np.expand_dims(N1, 1)
+
+ # CSd to CSc:
+ za_vec = sim_lines._zone_axes.vector_detector[0, za_idx].data
+ za_vec_trans = np.matmul(za_vec, np.linalg.inv(u_os)).squeeze()
+ N2 = np.sqrt(np.sum(np.square(za_vec_trans), axis=1))
+ za_vec_trans_n = za_vec_trans / np.expand_dims(N2, 1)
+
+ # angles between the two sets of vectors:
+ a_ang = np.array(
+ [
+ np.rad2deg(
+ np.arccos(
+ np.around(
+ np.dot(za_vec_trans_n[i, :], d_vec_n[i, :]), decimals=8
+ )
+ )
+ )
+ for i in range(3)
+ ]
+ )
+
+ return a_ang, d_vec_n
+
+ def get_d_ang(self, cds_px_int, conversions, u_os, d_vec_n, det):
+ """Find the gnomonic coordinates of the nearest
+ pixel centre to each of the 3 zone axes. Turn them
+ into vectors and transform them from CSd to CSc using
+ the same transformation as the
+ GeometricalKikuchiPatternSimulation. Then
+ calculate the angles (d_ang) between these vectors
+ and the vectors representing the zone axes in CSc but
+ calculated from CSk to CSc (d_vec_n).
+ """
+ # Here we add 0.5 because pixel centres are used by the direction
+ # cosines method and we need to take the same coords for this
+ # alternative approach.
+ cds_gn_int = np.squeeze(
+ convert_coordinates(cds_px_int + 0.5, "pix_to_gn", conversions)
+ )
+
+ vecs = np.hstack(
+ (
+ cds_gn_int * det.pcz,
+ np.repeat(np.atleast_2d(det.pcz), cds_gn_int.shape[0], axis=0),
+ )
+ )
+ N3 = np.sqrt(np.sum(np.square(vecs), axis=1))
+ vecs_n = vecs / np.expand_dims(N3, 1)
+
+ # CSd to CSc:
+ dddd = np.matmul(vecs_n, np.linalg.inv(u_os)).squeeze()
+
+ d_ang = np.array(
+ [
+ np.rad2deg(
+ np.arccos(np.around(np.dot(dddd[i, :], d_vec_n[i, :]), decimals=8))
+ )
+ for i in range(3)
+ ]
+ )
+
+ return d_ang, dddd
+
+ def get_r_ang_and_n_ang(self, cds_px_int, det, d_vec_n, dddd):
+ """Calculate the direction cosines of all the
+ detector pixels then rotate them to account
+ for the crystal orientation. The resulting
+ vectors are in CSc. Select the vectors
+ corresponding to the pixel centres representing the 3
+ zone axes. Then calculate the angles (r_ang) between
+ these vectors and the vectors representing the
+ zone axes in CSc but calculated from CSk to CSc
+ (d_vec_n). Finally calculate the angles (n_ang) between
+ the two sets of vectors representing the centres of the
+ nearest pixels to the zone axes (one set is from the direction
+ cosines of the detecotr, the other is from the
+ GeometricalKikuchiPatternSimulation). The angles are
+ zero if the transformations used for the direction
+ cosines and for the GeometricalKikuchiPatternSimulation
+ are the same (this is tested later).
+ """
+ # swap columns for i,j array indexing:
+ cds_px_int_ij = cds_px_int[:, ::-1]
+
+ # all detector pixels:
+ r_g_array = _get_direction_cosines_from_detector(det)
+ r_g_array_rot = rotate_vector(self.rotations.data[0], r_g_array)
+ rgarrrot_reshaped = r_g_array_rot.reshape((*self.detector.shape, 3))
+
+ # select vectors corresponding to the nearest pixels to the chosen zone axes
+ rgrar_vec = rgarrrot_reshaped[cds_px_int_ij[:, 0], cds_px_int_ij[:, 1]]
+
+ r_ang = np.array(
+ [
+ np.rad2deg(
+ np.arccos(
+ np.around(np.dot(d_vec_n[i, :], rgrar_vec[i, :]), decimals=8)
+ )
+ )
+ for i in range(3)
+ ]
+ )
+
+ n_ang = np.array(
+ [
+ np.rad2deg(
+ np.arccos(
+ np.around(np.dot(dddd[i, :], rgrar_vec[i, :]), decimals=8)
+ )
+ )
+ for i in range(3)
+ ]
+ )
+
+ return r_ang, n_ang
+
+ def calculate_fit(self, tilt, azimuthal, twist, zone_axes):
+ """
+ Calculates four sets of angles with which the fit
+ between the EBSD pattern simulated from a master
+ pattern, and the GeometricalKikuchiPatternSimulation
+ generated using the same parameters, can be evaluated.
+ The function can be tested with different values of
+ the detector tilt, azimuthal and euler_2 angles and
+ appropriate zone axes (indices uvw) which appear on
+ the pattern under those conditions.
+
+ The approach has several steps:
+
+ 1. Check that the GeometricalKikuchiPatternSimulation
+ is self-consistent. We do this by taking the vectors
+ in the detector reference frame of 3 zone axes in
+ the simulation, transforming these vectors from CSd
+ to the cartesian crystal reference frame, CSc, and
+ measuring the angle between these and vectors
+ transformed from the uvw indices of the zone axes
+ into the cartesian crystal reference frame
+ i.e. CSk to CSc. These angles (a_ang) should all be
+ zero if the GeometricalKikuchiPatternSimulation is
+ self-consistent.
+
+ 2. Find the gnomonic coordinates of the nearest
+ pixel centre to the 3 zone axes. This enables us
+ to check the vectors corresponding to the detector
+ pixels. We do two things with these coordinates.
+ a) turn them into vectors and transform them
+ from CSd to CSc using the same transformation as
+ the GeometricalKikuchiPatternSimulation. Then
+ calculate the angles (d_ang) between these vectors
+ and the vectors representing the zone axes in CSc.
+ b) calculate the direction cosines of all the
+ detector pixels then rotate them to account
+ for the crystal orientation. The resulting
+ vectors are in CSc but calculated using
+ different functions. Select the vectors
+ corresponding to the pixels representing the 3
+ zone axes. Then calculate the angles (r_ang) between
+ these vectors and the vectors representing the
+ zone axes in CSc.
+ These angles should be the same for a) and b),
+ meaning that the angle between the zone axes
+ and the centre of the nearest pixel is the
+ same for both transformation routes.
+
+ 3. Finally calculate the angles (n_ang) between the two
+ sets of vectors representing the centres of the
+ nearest pixels to the zone axes. The angles are
+ zero if the transformations used for the direction
+ cosines and for the GeometricalKikuchiPatternSimulation
+ are the same.
+
+
+ Parameters
+ ----------
+ tilt : Float
+ The detector tilt angle in degrees (i.e. detector.tilt).
+ Detector Euler angle PHI (EBSDDetector.euler[1]) == 90 + tilt
+ azimuthal : Float
+ The detector azimuthal angle in degrees (i.e. detector.azimuthal).
+ Detector Euler angle phi1 (EBSDDetector.euler[0]) == azimuthal
+ twist : Float
+ The detector twist angle (EBSDDetector.euler[2]) in deg.
+ zone_axes : List or np.ndarray
+ List/array containing three lists, each containing a set
+ of uvw indices describing a zone axis on the pattern,
+ e.g. [[0,0,1], [1,1,0], [1,2,3]].
+
+ Returns
+ -------
+ a_ang : np.ndarray
+ The angles in degrees, between vectors in CSc calculated
+ 1) from uvw indeces in CSk and 2) from vectors in CSd.
+ These should all be zero for self-consistency of
+ the GeometricalKikuchiPatternSimulation.
+ d_ang : np.ndarray
+ The angles in degrees, between vectors representing
+ 3 zone axes on the pattern and vectors representing
+ the nearest pixel centres to the same zone axes.
+ Both sets of vectors are transformed into CSc.
+ The transformation for both sets was the one used
+ by the GeometricalKikuchiPatternSimulation.
+ r_ang : np.ndarray
+ The angles in degrees, between vectors representing
+ 3 zone axes on the pattern and vectors representing
+ the nearest pixel centres to the same zone axes but
+ this time, the pixel centre vectors use the
+ transformation of the direction cosines of the
+ detector pixels.
+ n_ang : np.ndarray
+ The angles in degrees between the two sets of vectors
+ representing the centres of the nearest pixels to 3
+ zone axes on the pattern. Both sets of vectors are
+ in CSc. The transformation for one set was the one
+ used by the GeometricalKikuchiPatternSimulation,
+ and the one used by the other set was for the
+ direction cosines of the detector. These angles
+ should all be zero if the pattern and simulation
+ match.
+ """
+ det, sim_lines, u_os = self.setup_detector_sim_and_u_os(tilt, azimuthal, twist)
+ za_idx, conversions, cds_px_int = self.setup_za_and_cds(
+ zone_axes, sim_lines, det
+ )
+ a_ang, d_vec_n = self.get_a_angles(sim_lines, za_idx, u_os)
+ d_ang, dddd = self.get_d_ang(cds_px_int, conversions, u_os, d_vec_n, det)
+ r_ang, n_ang = self.get_r_ang_and_n_ang(cds_px_int, det, d_vec_n, dddd)
+
+ return a_ang, d_ang, r_ang, n_ang
+
+ @pytest.mark.parametrize(
+ "tilt, azimuthal, twist, zone_axes",
+ [
+ (0.0, 0.0, 0.0, [[1, 0, 1], [0, 0, 1], [1, 1, 2]]),
+ (0.0, 0.0, 1.2, [[1, 0, 1], [0, 0, 1], [1, 1, 2]]),
+ (40.0, 0.0, 0.0, [[1, 0, 1], [1, 0, 0], [1, -2, 1]]),
+ (40.0, 0.0, 1.2, [[1, 0, 1], [1, 0, 0], [1, -2, 1]]),
+ (0.0, 40.0, 0.0, [[1, 0, 1], [1, 1, 0], [1, 2, 1]]),
+ (0.0, 40.0, 1.2, [[1, 0, 1], [1, 1, 0], [1, 2, 1]]),
+ (40.0, 40.0, 0.0, [[1, 0, 1], [1, 0, 0], [3, 1, 0]]),
+ (40.0, 40.0, 1.2, [[1, 0, 1], [1, 0, 0], [3, 1, 0]]),
+ ],
+ )
+ def test_fit_detector_orientation(self, tilt, azimuthal, twist, zone_axes):
+ """
+ Check that the EBSD pattern simulated from a master
+ pattern and the associated
+ GeometricalKikuchiPatternSimulation match perfectly,
+ for various detector orientations.
+
+ 4 sets of angles are returned by self.calculate_fit().
+ See the doctstring of that function for details.
+
+ Here we assert that the first set of angles are all
+ zero, that the second and third sets are equal, and
+ that the fourth set are all zero. If these conditions
+ are all met, the GeometricalKikuchiPatternSimulation
+ should match the EBSD pattern simulated from a
+ master pattern perfectly for the given detector
+ orientations.
+
+
+ Parameters
+ ----------
+ tilt : Float
+ The detector tilt angle in degrees (i.e. detector.tilt).
+ Detector Euler angle PHI (EBSDDetector.euler[1]) == 90 + tilt
+ azimuthal : Float
+ The detector azimuthal angle in degrees (i.e. detector.azimuthal).
+ Detector Euler angle phi1 (EBSDDetector.euler[0]) == azimuthal
+ twist : Float
+ The detector twist angle (EBSDDetector.euler[2]) in deg.
+ zone_axes : List or np.ndarray
+ List/array containing three lists, each containing a set
+ of uvw indices describing a zone axis on the pattern,
+ e.g. [[0,0,1], [1,1,0], [1,2,3]].
+
+ Returns
+ -------
+ None.
+
+ """
+ angles = self.calculate_fit(tilt, azimuthal, twist, zone_axes)
+
+ assert np.allclose(angles[0], 0.0)
+ assert np.allclose(angles[1], angles[2])
+ assert np.allclose(angles[3], 0.0)
+
+
+def index_row_in_array(myarray, myrow):
+ """Check if the row "myrow" is present in the array "myarray".
+ If it is, return an int containing the row index of the first
+ occurrence. If the row is not present, return None.
+ """
+ loc = np.where((myarray == myrow).all(-1))[0]
+ if len(loc) > 0:
+ return loc[0]
+ return None
diff --git a/tests/test_utils/test_detector_coordinates.py b/tests/test_utils/test_detector_coordinates.py
new file mode 100644
index 00000000..bfd8c314
--- /dev/null
+++ b/tests/test_utils/test_detector_coordinates.py
@@ -0,0 +1,319 @@
+# Copyright 2019-2024 The kikuchipy developers
+#
+# This file is part of kikuchipy.
+#
+# kikuchipy is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# kikuchipy is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with kikuchipy. If not, see .
+
+import numpy as np
+import pytest
+
+import kikuchipy as kp
+from kikuchipy._utils._detector_coordinates import (
+ _convert_coordinates,
+ convert_coordinates,
+ get_coordinate_conversions,
+)
+
+
+class TestDetectorCoordinates:
+ s = kp.data.nickel_ebsd_small()
+ det_1d = kp.detectors.EBSDDetector(shape=(60, 60), pc=s.detector.pc[0,])
+ det_2d = s.detector.deepcopy()
+ conv_1d = get_coordinate_conversions(det_1d.gnomonic_bounds, det_1d.bounds)
+ conv_2d = get_coordinate_conversions(det_2d.gnomonic_bounds, det_2d.bounds)
+ coords_5d = np.random.randint(0, 60, (3, 3, 17, 300, 2))
+ coords_4d = coords_5d[:, :, 0] # (3, 3, 300, 2)
+ coords_3d = coords_5d[0, 0] # (17, 300, 2)
+ coords_2d = coords_5d[0, 0, 0] # (300, 2)
+
+ def test_get_conversion_factors(self):
+ conv_1d = get_coordinate_conversions(
+ self.det_1d.gnomonic_bounds, self.det_1d.bounds
+ )
+
+ exp_res_1d = {
+ "pix_to_gn": {
+ "m_x": np.array([0.03319923, 0.03326385, 0.03330547]),
+ "c_x": np.array([-0.83957734, -0.84652344, -0.85204404]),
+ "m_y": np.array([-0.03319923, -0.03326385, -0.03330547]),
+ "c_y": np.array([0.42827701, 0.41940433, 0.42255835]),
+ },
+ "gn_to_pix": {
+ "m_x": np.array([30.12118421, 30.06266362, 30.02509794]),
+ "c_x": np.array([25.28906376, 25.4487495, 25.58270568]),
+ "m_y": np.array([-30.12118421, -30.06266362, -30.02509794]),
+ "c_y": np.array([12.90021062, 12.60841133, 12.6873559]),
+ },
+ }
+
+ for i in ["pix_to_gn", "gn_to_pix"]:
+ for j in ["m_x", "c_x", "m_y", "c_y"]:
+ assert np.allclose(conv_1d[i][j], exp_res_1d[i][j])
+
+ @pytest.mark.parametrize(
+ "coords, detector_index, desired_coords",
+ [
+ (
+ np.array([[36.2, 12.7]]),
+ None,
+ np.array(
+ [
+ [[[0.36223463, 0.00664684]], [[0.357628, -0.00304659]]],
+ [[[0.36432453, 0.00973462]], [[0.35219232, 0.00567801]]],
+ ]
+ ),
+ ),
+ (
+ np.array([[36.2, 12.7], [2.5, 43.7], [8.2, 27.7]]),
+ (0, 1),
+ np.array(
+ [
+ [0.35762801, -0.00304659],
+ [-0.76336381, -1.03422601],
+ [-0.57375985, -0.50200438],
+ ]
+ ),
+ ),
+ ],
+ )
+ def test_coordinate_conversions_correct(
+ self, coords, detector_index, desired_coords
+ ):
+ """Coordinate conversion factors have expected values."""
+ pc = np.array(
+ [
+ [
+ [0.4214844, 0.21500351, 0.50201974],
+ [0.42414583, 0.21014019, 0.50104439],
+ ],
+ [
+ [0.42088203, 0.2165417, 0.50079336],
+ [0.42725023, 0.21450546, 0.49996293],
+ ],
+ ]
+ )
+ det = kp.detectors.EBSDDetector(shape=(60, 60), pc=pc)
+ conv = get_coordinate_conversions(det.gnomonic_bounds, det.bounds)
+ cds_out = convert_coordinates(coords, "pix_to_gn", conv, detector_index)
+ cds_back = convert_coordinates(cds_out, "gn_to_pix", conv, detector_index)
+ assert np.allclose(cds_out, desired_coords)
+ assert np.allclose(cds_back[..., :], coords[..., :])
+
+ @pytest.mark.parametrize(
+ "coords, detector_index, desired_coords",
+ [
+ (
+ np.array([[36.2, 12.7]]),
+ None,
+ np.array(
+ [
+ [[[0.36223463, 0.00664684]], [[0.357628, -0.00304659]]],
+ [[[0.36432453, 0.00973462]], [[0.35219232, 0.00567801]]],
+ ]
+ ),
+ ),
+ (
+ np.array([[36.2, 12.7], [2.5, 43.7], [8.2, 27.7]]),
+ (0, 1),
+ np.array(
+ [
+ [0.35762801, -0.00304659],
+ [-0.76336381, -1.03422601],
+ [-0.57375985, -0.50200438],
+ ]
+ ),
+ ),
+ ],
+ )
+ def test_coordinate_conversions_correct_worker(
+ self, coords, detector_index, desired_coords
+ ):
+ """Worker function coordinate conversion factors have expected values."""
+ pc = np.array(
+ [
+ [
+ [0.4214844, 0.21500351, 0.50201974],
+ [0.42414583, 0.21014019, 0.50104439],
+ ],
+ [
+ [0.42088203, 0.2165417, 0.50079336],
+ [0.42725023, 0.21450546, 0.49996293],
+ ],
+ ]
+ )
+ det = kp.detectors.EBSDDetector(shape=(60, 60), pc=pc)
+ conv = get_coordinate_conversions(det.gnomonic_bounds, det.bounds)
+ nav_shape = conv["pix_to_gn"]["m_x"].shape
+ nav_ndim = len(nav_shape)
+
+ if isinstance(detector_index, type(None)):
+ detector_index = ()
+ if coords.ndim >= nav_ndim + 2 and coords.shape[:nav_ndim] == nav_shape:
+ # one or more sets of coords, different for each image
+ out_shape = coords.shape
+ else:
+ # one or more sets of coords, the same for each image
+ out_shape = nav_shape + coords.shape
+
+ extra_axes = list(range(nav_ndim, len(out_shape) - 1))
+ cds_out = _convert_coordinates(
+ coords,
+ out_shape,
+ detector_index,
+ np.expand_dims(conv["pix_to_gn"]["m_x"], extra_axes),
+ np.expand_dims(conv["pix_to_gn"]["c_x"], extra_axes),
+ np.expand_dims(conv["pix_to_gn"]["m_y"], extra_axes),
+ np.expand_dims(conv["pix_to_gn"]["c_y"], extra_axes),
+ )
+
+ cds_back = _convert_coordinates(
+ cds_out,
+ out_shape,
+ detector_index,
+ np.expand_dims(conv["gn_to_pix"]["m_x"], extra_axes),
+ np.expand_dims(conv["gn_to_pix"]["c_x"], extra_axes),
+ np.expand_dims(conv["gn_to_pix"]["m_y"], extra_axes),
+ np.expand_dims(conv["gn_to_pix"]["c_y"], extra_axes),
+ )
+
+ else:
+ out_shape = coords.shape
+
+ cds_out = _convert_coordinates(
+ coords,
+ out_shape,
+ detector_index,
+ conv["pix_to_gn"]["m_x"],
+ conv["pix_to_gn"]["c_x"],
+ conv["pix_to_gn"]["m_y"],
+ conv["pix_to_gn"]["c_y"],
+ )
+
+ cds_back = _convert_coordinates(
+ cds_out,
+ out_shape,
+ detector_index,
+ conv["gn_to_pix"]["m_x"],
+ conv["gn_to_pix"]["c_x"],
+ conv["gn_to_pix"]["m_y"],
+ conv["gn_to_pix"]["c_y"],
+ )
+
+ assert np.allclose(cds_out, desired_coords)
+ assert np.allclose(cds_back[..., :], coords[..., :])
+
+ def test_coordinate_conversions_indexing(self):
+ """Converting from pixel to gnomonic coords and back."""
+ # conversions
+ cds_out5d_1 = convert_coordinates(
+ self.coords_5d, "pix_to_gn", self.conv_2d, None
+ )
+ cds_out5d_2 = convert_coordinates(
+ self.coords_5d, "pix_to_gn", self.conv_2d, (1, 2)
+ )
+
+ cds_out4d_1 = convert_coordinates(
+ self.coords_4d, "pix_to_gn", self.conv_2d, None
+ )
+ cds_out4d_2 = convert_coordinates(
+ self.coords_4d, "pix_to_gn", self.conv_2d, (1, 2)
+ )
+
+ cds_out3d_1 = convert_coordinates(
+ self.coords_3d, "pix_to_gn", self.conv_2d, None
+ )
+ cds_out3d_2 = convert_coordinates(
+ self.coords_3d, "pix_to_gn", self.conv_2d, (1, 2)
+ )
+
+ cds_out2d_1 = convert_coordinates(
+ self.coords_2d, "pix_to_gn", self.conv_2d, None
+ )
+ cds_out2d_2 = convert_coordinates(
+ self.coords_2d, "pix_to_gn", self.conv_2d, (1, 2)
+ )
+
+ cds_out5d_3 = convert_coordinates(
+ self.coords_5d, "pix_to_gn", self.conv_1d, None
+ )
+ cds_out5d_4 = convert_coordinates(
+ self.coords_5d, "pix_to_gn", self.conv_1d, (1)
+ )
+ cds_out5d_5 = convert_coordinates(self.coords_5d, "pix_to_gn", self.conv_1d, 1)
+
+ cds_out4d_3 = convert_coordinates(
+ self.coords_4d, "pix_to_gn", self.conv_1d, None
+ )
+ cds_out4d_4 = convert_coordinates(
+ self.coords_4d, "pix_to_gn", self.conv_1d, (1,)
+ )
+ cds_out4d_5 = convert_coordinates(self.coords_4d, "pix_to_gn", self.conv_1d, 1)
+
+ cds_out3d_3 = convert_coordinates(
+ self.coords_3d, "pix_to_gn", self.conv_1d, None
+ )
+ cds_out3d_4 = convert_coordinates(
+ self.coords_3d, "pix_to_gn", self.conv_1d, (1,)
+ )
+ cds_out3d_5 = convert_coordinates(self.coords_3d, "pix_to_gn", self.conv_1d, 1)
+
+ cds_out2d_3 = convert_coordinates(
+ self.coords_2d, "pix_to_gn", self.conv_1d, None
+ )
+ cds_out2d_4 = convert_coordinates(
+ self.coords_2d, "pix_to_gn", self.conv_1d, (1,)
+ )
+ cds_out2d_5 = convert_coordinates(self.coords_2d, "pix_to_gn", self.conv_1d, 1)
+
+ # convert back
+ cds_back_5d_1 = convert_coordinates(
+ cds_out5d_1, "gn_to_pix", self.conv_2d, None
+ )
+ cds_back_4d_1 = convert_coordinates(
+ cds_out4d_1, "gn_to_pix", self.conv_2d, None
+ )
+ cds_back_3d_1 = convert_coordinates(
+ cds_out3d_1, "gn_to_pix", self.conv_2d, None
+ )
+ cds_back_2d_1 = convert_coordinates(
+ cds_out2d_1, "gn_to_pix", self.conv_2d, None
+ )
+
+ # indexing checks
+ assert np.allclose(cds_out5d_2[1, 2], cds_out5d_1[1, 2])
+ assert np.allclose(cds_out4d_2[1, 2], cds_out4d_1[1, 2])
+ assert np.allclose(cds_out3d_2, cds_out3d_1[1, 2])
+ assert np.allclose(cds_out2d_2, cds_out2d_1[1, 2])
+
+ assert np.allclose(cds_out5d_4[1], cds_out5d_3[1])
+ assert np.allclose(cds_out5d_5, cds_out5d_4)
+ assert np.allclose(cds_out4d_4[1], cds_out4d_3[1])
+ assert np.allclose(cds_out4d_5, cds_out4d_4)
+ assert np.allclose(cds_out3d_4, cds_out3d_3[1])
+ assert np.allclose(cds_out3d_5, cds_out3d_4)
+ assert np.allclose(cds_out2d_4, cds_out2d_3[1])
+ assert np.allclose(cds_out2d_5, cds_out2d_4)
+
+ # back-conversion checks
+ assert np.allclose(cds_back_5d_1, self.coords_5d)
+ assert np.allclose(cds_back_4d_1, self.coords_4d)
+ assert np.allclose(cds_back_3d_1, self.coords_3d)
+ assert np.allclose(cds_back_2d_1, self.coords_2d)
+
+ for i in range(3):
+ for j in range(3):
+ q = convert_coordinates(
+ self.coords_5d[i, j], "pix_to_gn", self.conv_2d, (i, j)
+ )
+ assert np.allclose(q, cds_out5d_1[i, j])
diff --git a/tests/test_utils/test_gnomonic_bounds.py b/tests/test_utils/test_gnomonic_bounds.py
new file mode 100644
index 00000000..79801dc6
--- /dev/null
+++ b/tests/test_utils/test_gnomonic_bounds.py
@@ -0,0 +1,36 @@
+# Copyright 2019-2024 The kikuchipy developers
+#
+# This file is part of kikuchipy.
+#
+# kikuchipy is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# kikuchipy is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with kikuchipy. If not, see .
+
+import numpy as np
+
+import kikuchipy as kp
+from kikuchipy._utils._gnonomic_bounds import get_gnomonic_bounds
+
+
+class TestGnomonicBounds:
+ s = kp.data.nickel_ebsd_small()
+ det_1d = kp.detectors.EBSDDetector(shape=(1024, 1244), pc=s.detector.pc[0, 0])
+ nrows = det_1d.nrows
+ ncols = det_1d.ncols
+ pcx = det_1d.pcx[0]
+ pcy = det_1d.pcy[0]
+ pcz = det_1d.pcz[0]
+
+ def test_gnomonic_bounds(self):
+ gn_b = get_gnomonic_bounds(self.nrows, self.ncols, self.pcx, self.pcy, self.pcz)
+
+ assert np.allclose(gn_b, np.squeeze(self.det_1d.gnomonic_bounds))
diff --git a/tests/test_utils/test_rotation.py b/tests/test_utils/test_rotation.py
index fb08e29b..00ad7966 100644
--- a/tests/test_utils/test_rotation.py
+++ b/tests/test_utils/test_rotation.py
@@ -35,15 +35,15 @@ def test_rotate_vector(self):
"""
rot = np.array([0.7071, 0.7071, 0, 0])
sig_shape = (20, 30)
+ det = kp.detectors.EBSDDetector(
+ shape=sig_shape, pc=(0.5, 0.5, 0.5), sample_tilt=70, tilt=10, azimuthal=0
+ )
dc = _get_direction_cosines_for_fixed_pc.py_func(
- pcx=0.5,
- pcy=0.5,
- pcz=0.5,
- nrows=sig_shape[0],
- ncols=sig_shape[1],
- tilt=10,
- azimuthal=0,
- sample_tilt=70,
+ gnomonic_bounds=det.gnomonic_bounds.squeeze().astype(np.float64),
+ pcz=det.pc.squeeze().astype(np.float64)[2],
+ nrows=det.nrows,
+ ncols=det.ncols,
+ om_detector_to_sample=(~det.sample_to_detector).to_matrix().squeeze(),
signal_mask=np.ones(sig_shape[0] * sig_shape[1], dtype=bool),
)