vedo.volume

Work with volumetric datasets (voxel data).

   1import glob
   2import os
   3
   4import numpy as np
   5
   6try:
   7    import vedo.vtkclasses as vtk
   8except ImportError:
   9    import vtkmodules.all as vtk
  10
  11import vedo
  12from vedo import utils
  13from vedo.base import Base3DProp
  14from vedo.base import BaseGrid
  15from vedo.mesh import Mesh
  16
  17
  18__docformat__ = "google"
  19
  20__doc__ = """
  21Work with volumetric datasets (voxel data).
  22
  23![](https://vedo.embl.es/images/volumetric/slicePlane2.png)
  24"""
  25
  26__all__ = ["BaseVolume", "Volume", "VolumeSlice"]
  27
  28
  29##########################################################################
  30class BaseVolume:
  31    """
  32    Base class. Do not instantiate.
  33    """
  34
  35    def __init__(self):
  36        """Base class. Do not instantiate."""
  37
  38        self._data = None
  39        self._mapper = None
  40        self.transform = None
  41        self.pipeline = None
  42
  43    def _repr_html_(self):
  44        """
  45        HTML representation of the Volume object for Jupyter Notebooks.
  46
  47        Returns:
  48            HTML text with the image and some properties.
  49        """
  50        import io
  51        import base64
  52        from PIL import Image
  53
  54        library_name = "vedo.volume.Volume"
  55        help_url = "https://vedo.embl.es/docs/vedo/volume.html"
  56
  57        arr = self.thumbnail(azimuth=0, elevation=-60, zoom=1.4, axes=True)
  58
  59        im = Image.fromarray(arr)
  60        buffered = io.BytesIO()
  61        im.save(buffered, format="PNG", quality=100)
  62        encoded = base64.b64encode(buffered.getvalue()).decode("utf-8")
  63        url = "data:image/png;base64," + encoded
  64        image = f"<img src='{url}'></img>"
  65
  66        # statisitics
  67        bounds = "<br/>".join(
  68            [
  69                utils.precision(min_x, 4) + " ... " + utils.precision(max_x, 4)
  70                for min_x, max_x in zip(self.bounds()[::2], self.bounds()[1::2])
  71            ]
  72        )
  73
  74        help_text = ""
  75        if self.name:
  76            help_text += f"<b> {self.name}: &nbsp&nbsp</b>"
  77        help_text += '<b><a href="' + help_url + '" target="_blank">' + library_name + "</a></b>"
  78        if self.filename:
  79            dots = ""
  80            if len(self.filename) > 30:
  81                dots = "..."
  82            help_text += f"<br/><code><i>({dots}{self.filename[-30:]})</i></code>"
  83
  84        pdata = ""
  85        if self._data.GetPointData().GetScalars():
  86            if self._data.GetPointData().GetScalars().GetName():
  87                name = self._data.GetPointData().GetScalars().GetName()
  88                pdata = "<tr><td><b> point data array </b></td><td>" + name + "</td></tr>"
  89
  90        cdata = ""
  91        if self._data.GetCellData().GetScalars():
  92            if self._data.GetCellData().GetScalars().GetName():
  93                name = self._data.GetCellData().GetScalars().GetName()
  94                cdata = "<tr><td><b> voxel data array </b></td><td>" + name + "</td></tr>"
  95
  96        img = self.GetMapper().GetInput()
  97
  98        allt = [
  99            "<table>",
 100            "<tr>",
 101            "<td>",
 102            image,
 103            "</td>",
 104            "<td style='text-align: center; vertical-align: center;'><br/>",
 105            help_text,
 106            "<table>",
 107            "<tr><td><b> bounds </b> <br/> (x/y/z) </td><td>" + str(bounds) + "</td></tr>",
 108            "<tr><td><b> dimensions </b></td><td>" + str(img.GetDimensions()) + "</td></tr>",
 109            "<tr><td><b> voxel spacing </b></td><td>"
 110            + utils.precision(img.GetSpacing(), 3)
 111            + "</td></tr>",
 112            "<tr><td><b> in memory size </b></td><td>"
 113            + str(int(img.GetActualMemorySize() / 1024))
 114            + "MB</td></tr>",
 115            pdata,
 116            cdata,
 117            "<tr><td><b> scalar range </b></td><td>"
 118            + utils.precision(img.GetScalarRange(), 4)
 119            + "</td></tr>",
 120            "</table>",
 121            "</table>",
 122        ]
 123        return "\n".join(allt)
 124
 125    def _update(self, img):
 126        self._data = img
 127        self._data.GetPointData().Modified()
 128        self._mapper.SetInputData(img)
 129        self._mapper.Modified()
 130        self._mapper.Update()
 131        return self
 132
 133    def clone(self):
 134        """Return a clone copy of the Volume."""
 135        newimg = vtk.vtkImageData()
 136        newimg.CopyStructure(self._data)
 137        newimg.CopyAttributes(self._data)
 138
 139        newvol = Volume(newimg)
 140        prop = vtk.vtkVolumeProperty()
 141        prop.DeepCopy(self.GetProperty())
 142        newvol.SetProperty(prop)
 143        newvol.SetOrigin(self.GetOrigin())
 144        newvol.SetScale(self.GetScale())
 145        newvol.SetOrientation(self.GetOrientation())
 146        newvol.SetPosition(self.GetPosition())
 147        newvol.pipeline = utils.OperationNode("clone", parents=[self], c="#bbd0ff", shape="diamond")
 148        return newvol
 149
 150    def imagedata(self):
 151        """Return the underlying `vtkImagaData` object."""
 152        return self._data
 153
 154    def tonumpy(self):
 155        """
 156        Get read-write access to voxels of a Volume object as a numpy array.
 157
 158        When you set values in the output image, you don't want numpy to reallocate the array
 159        but instead set values in the existing array, so use the [:] operator.
 160
 161        Example:
 162            `arr[:] = arr*2 + 15`
 163
 164        If the array is modified add a call to:
 165        `volume.imagedata().GetPointData().GetScalars().Modified()`
 166        when all your modifications are completed.
 167        """
 168        narray_shape = tuple(reversed(self._data.GetDimensions()))
 169
 170        scals = self._data.GetPointData().GetScalars()
 171        comps = scals.GetNumberOfComponents()
 172        if comps == 1:
 173            narray = utils.vtk2numpy(scals).reshape(narray_shape)
 174            narray = np.transpose(narray, axes=[2, 1, 0])
 175        else:
 176            narray = utils.vtk2numpy(scals).reshape(*narray_shape, comps)
 177            narray = np.transpose(narray, axes=[2, 1, 0, 3])
 178
 179        # narray = utils.vtk2numpy(self._data.GetPointData().GetScalars()).reshape(narray_shape)
 180        # narray = np.transpose(narray, axes=[2, 1, 0])
 181
 182        return narray
 183
 184    def dimensions(self):
 185        """Return the nr. of voxels in the 3 dimensions."""
 186        return np.array(self._data.GetDimensions())
 187
 188    def scalar_range(self):
 189        """Return the range of the scalar values."""
 190        return np.array(self._data.GetScalarRange())
 191
 192    def spacing(self, s=None):
 193        """Set/get the voxels size in the 3 dimensions."""
 194        if s is not None:
 195            self._data.SetSpacing(s)
 196            return self
 197        return np.array(self._data.GetSpacing())
 198
 199    # def pos(self, p=None): # conflicts with API of base.x()
 200    #     """Same effect as calling `origin()`."""
 201    #     return self.origin(p)
 202
 203    def origin(self, s=None):
 204        """Set/get the origin of the volumetric dataset."""
 205        ### supersedes base.origin()
 206        ### DIFFERENT from base.origin()!
 207        if s is not None:
 208            self._data.SetOrigin(s)
 209            return self
 210        return np.array(self._data.GetOrigin())
 211
 212    def center(self, p=None):
 213        """Set/get the center of the volumetric dataset."""
 214        if p is not None:
 215            self._data.SetCenter(p)
 216            return self
 217        return np.array(self._data.GetCenter())
 218
 219    def permute_axes(self, x, y, z):
 220        """
 221        Reorder the axes of the Volume by specifying
 222        the input axes which are supposed to become the new X, Y, and Z.
 223        """
 224        imp = vtk.vtkImagePermute()
 225        imp.SetFilteredAxes(x, y, z)
 226        imp.SetInputData(self.imagedata())
 227        imp.Update()
 228        self._update(imp.GetOutput())
 229        self.pipeline = utils.OperationNode(
 230            f"permute_axes\n{(x,y,z)}", parents=[self], c="#4cc9f0"
 231        )
 232        return self
 233
 234    def resample(self, new_spacing, interpolation=1):
 235        """
 236        Resamples a `Volume` to be larger or smaller.
 237
 238        This method modifies the spacing of the input.
 239        Linear interpolation is used to resample the data.
 240
 241        Arguments:
 242            new_spacing : (list)
 243                a list of 3 new spacings for the 3 axes
 244            interpolation : (int)
 245                0=nearest_neighbor, 1=linear, 2=cubic
 246        """
 247        rsp = vtk.vtkImageResample()
 248        oldsp = self.spacing()
 249        for i in range(3):
 250            if oldsp[i] != new_spacing[i]:
 251                rsp.SetAxisOutputSpacing(i, new_spacing[i])
 252        rsp.InterpolateOn()
 253        rsp.SetInterpolationMode(interpolation)
 254        rsp.OptimizationOn()
 255        rsp.Update()
 256        self._update(rsp.GetOutput())
 257        self.pipeline = utils.OperationNode(
 258            f"resample\n{tuple(new_spacing)}", parents=[self], c="#4cc9f0"
 259        )
 260        return self
 261
 262    def interpolation(self, itype):
 263        """
 264        Set interpolation type.
 265
 266        0=nearest neighbour, 1=linear
 267        """
 268        self.property.SetInterpolationType(itype)
 269        return self
 270
 271    def threshold(self, above=None, below=None, replace=None, replace_value=None):
 272        """
 273        Binary or continuous volume thresholding.
 274        Find the voxels that contain a value above/below the input values
 275        and replace them with a new value (default is 0).
 276        """
 277        th = vtk.vtkImageThreshold()
 278        th.SetInputData(self.imagedata())
 279
 280        # sanity checks
 281        if above is not None and below is not None:
 282            if above == below:
 283                return self
 284            if above > below:
 285                vedo.logger.warning("in volume.threshold(), above > below, skip.")
 286                return self
 287
 288        ## cases
 289        if below is not None and above is not None:
 290            th.ThresholdBetween(above, below)
 291
 292        elif above is not None:
 293            th.ThresholdByUpper(above)
 294
 295        elif below is not None:
 296            th.ThresholdByLower(below)
 297
 298        ##
 299        if replace is not None:
 300            th.SetReplaceIn(True)
 301            th.SetInValue(replace)
 302        else:
 303            th.SetReplaceIn(False)
 304
 305        if replace_value is not None:
 306            th.SetReplaceOut(True)
 307            th.SetOutValue(replace_value)
 308        else:
 309            th.SetReplaceOut(False)
 310
 311        th.Update()
 312        self._update(th.GetOutput())
 313        self.pipeline = utils.OperationNode("threshold", parents=[self], c="#4cc9f0")
 314        return self
 315
 316    def crop(self, left=None, right=None, back=None, front=None, bottom=None, top=None, VOI=()):
 317        """
 318        Crop a `Volume` object.
 319
 320        Arguments:
 321            left : (float)
 322                fraction to crop from the left plane (negative x)
 323            right : (float)
 324                fraction to crop from the right plane (positive x)
 325            back : (float)
 326                fraction to crop from the back plane (negative y)
 327            front : (float)
 328                fraction to crop from the front plane (positive y)
 329            bottom : (float)
 330                fraction to crop from the bottom plane (negative z)
 331            top : (float)
 332                fraction to crop from the top plane (positive z)
 333            VOI : (list)
 334                extract Volume Of Interest expressed in voxel numbers
 335
 336        Example:
 337            `vol.crop(VOI=(xmin, xmax, ymin, ymax, zmin, zmax)) # all integers nrs`
 338        """
 339        extractVOI = vtk.vtkExtractVOI()
 340        extractVOI.SetInputData(self.imagedata())
 341
 342        if VOI:
 343            extractVOI.SetVOI(VOI)
 344        else:
 345            d = self.imagedata().GetDimensions()
 346            bx0, bx1, by0, by1, bz0, bz1 = 0, d[0]-1, 0, d[1]-1, 0, d[2]-1
 347            if left is not None:   bx0 = int((d[0]-1)*left)
 348            if right is not None:  bx1 = int((d[0]-1)*(1-right))
 349            if back is not None:   by0 = int((d[1]-1)*back)
 350            if front is not None:  by1 = int((d[1]-1)*(1-front))
 351            if bottom is not None: bz0 = int((d[2]-1)*bottom)
 352            if top is not None:    bz1 = int((d[2]-1)*(1-top))
 353            extractVOI.SetVOI(bx0, bx1, by0, by1, bz0, bz1)
 354        extractVOI.Update()
 355        self._update(extractVOI.GetOutput())
 356
 357        self.pipeline = utils.OperationNode(
 358            "crop", parents=[self], c="#4cc9f0", comment=f"dims={tuple(self.dimensions())}"
 359        )
 360        return self
 361
 362    def append(self, volumes, axis="z", preserve_extents=False):
 363        """
 364        Take the components from multiple inputs and merges them into one output.
 365        Except for the append axis, all inputs must have the same extent.
 366        All inputs must have the same number of scalar components.
 367        The output has the same origin and spacing as the first input.
 368        The origin and spacing of all other inputs are ignored.
 369        All inputs must have the same scalar type.
 370
 371        Arguments:
 372            axis : (int, str)
 373                axis expanded to hold the multiple images
 374            preserve_extents : (bool)
 375                if True, the extent of the inputs is used to place
 376                the image in the output. The whole extent of the output is the union of the input
 377                whole extents. Any portion of the output not covered by the inputs is set to zero.
 378                The origin and spacing is taken from the first input.
 379
 380        Example:
 381            ```python
 382            from vedo import Volume, dataurl
 383            vol = Volume(dataurl+'embryo.tif')
 384            vol.append(vol, axis='x').show().close()
 385            ```
 386            ![](https://vedo.embl.es/images/feats/volume_append.png)
 387        """
 388        ima = vtk.vtkImageAppend()
 389        ima.SetInputData(self.imagedata())
 390        if not utils.is_sequence(volumes):
 391            volumes = [volumes]
 392        for volume in volumes:
 393            if isinstance(volume, vtk.vtkImageData):
 394                ima.AddInputData(volume)
 395            else:
 396                ima.AddInputData(volume.imagedata())
 397        ima.SetPreserveExtents(preserve_extents)
 398        if axis == "x":
 399            axis = 0
 400        elif axis == "y":
 401            axis = 1
 402        elif axis == "z":
 403            axis = 2
 404        ima.SetAppendAxis(axis)
 405        ima.Update()
 406        self._update(ima.GetOutput())
 407
 408        self.pipeline = utils.OperationNode(
 409            "append",
 410            parents=[self, *volumes],
 411            c="#4cc9f0",
 412            comment=f"dims={tuple(self.dimensions())}",
 413        )
 414        return self
 415
 416    def pad(self, voxels=10, value=0):
 417        """
 418        Add the specified number of voxels at the `Volume` borders.
 419        Voxels can be a list formatted as `[nx0, nx1, ny0, ny1, nz0, nz1]`.
 420
 421        Arguments:
 422            voxels : (int, list)
 423                number of voxels to be added (or a list of length 4)
 424            value : (int)
 425                intensity value (gray-scale color) of the padding
 426        
 427        Example:
 428            ```python
 429            from vedo import Volume, dataurl, show
 430            iso = Volume(dataurl+'embryo.tif').isosurface()
 431            vol = iso.binarize(spacing=(100, 100, 100)).pad(10)
 432            vol.dilate([15,15,15])
 433            show(iso, vol.isosurface(), N=2, axes=1)
 434            ```
 435            ![](https://vedo.embl.es/images/volumetric/volume_pad.png)
 436        """
 437        x0, x1, y0, y1, z0, z1 = self._data.GetExtent()
 438        pf = vtk.vtkImageConstantPad()
 439        pf.SetInputData(self._data)
 440        pf.SetConstant(value)
 441        if utils.is_sequence(voxels):
 442            pf.SetOutputWholeExtent(
 443                x0 - voxels[0], x1 + voxels[1], 
 444                y0 - voxels[2], y1 + voxels[3], 
 445                z0 - voxels[4], z1 + voxels[5], 
 446            )
 447        else:
 448            pf.SetOutputWholeExtent(
 449                x0 - voxels, x1 + voxels, 
 450                y0 - voxels, y1 + voxels, 
 451                z0 - voxels, z1 + voxels,
 452            )
 453        pf.Update()
 454        self._update(pf.GetOutput())
 455        self.pipeline = utils.OperationNode(
 456            "pad", comment=f"{voxels} voxels", parents=[self], c="#f28482"
 457        )
 458        return self
 459
 460    def resize(self, *newdims):
 461        """Increase or reduce the number of voxels of a Volume with interpolation."""
 462        old_dims = np.array(self.imagedata().GetDimensions())
 463        old_spac = np.array(self.imagedata().GetSpacing())
 464        rsz = vtk.vtkImageResize()
 465        rsz.SetResizeMethodToOutputDimensions()
 466        rsz.SetInputData(self.imagedata())
 467        rsz.SetOutputDimensions(newdims)
 468        rsz.Update()
 469        self._data = rsz.GetOutput()
 470        new_spac = old_spac * old_dims / newdims  # keep aspect ratio
 471        self._data.SetSpacing(new_spac)
 472        self._update(self._data)
 473        self.pipeline = utils.OperationNode(
 474            "resize", parents=[self], c="#4cc9f0", comment=f"dims={tuple(self.dimensions())}"
 475        )
 476        return self
 477
 478    def normalize(self):
 479        """Normalize that scalar components for each point."""
 480        norm = vtk.vtkImageNormalize()
 481        norm.SetInputData(self.imagedata())
 482        norm.Update()
 483        self._update(norm.GetOutput())
 484        self.pipeline = utils.OperationNode("normalize", parents=[self], c="#4cc9f0")
 485        return self
 486
 487    def mirror(self, axis="x"):
 488        """
 489        Mirror flip along one of the cartesian axes.
 490        """
 491        img = self.imagedata()
 492
 493        ff = vtk.vtkImageFlip()
 494        ff.SetInputData(img)
 495        if axis.lower() == "x":
 496            ff.SetFilteredAxis(0)
 497        elif axis.lower() == "y":
 498            ff.SetFilteredAxis(1)
 499        elif axis.lower() == "z":
 500            ff.SetFilteredAxis(2)
 501        else:
 502            vedo.logger.error("mirror must be set to either x, y, z or n")
 503            raise RuntimeError()
 504        ff.Update()
 505        self._update(ff.GetOutput())
 506        self.pipeline = utils.OperationNode(f"mirror {axis}", parents=[self], c="#4cc9f0")
 507        return self
 508
 509    def operation(self, operation, volume2=None):
 510        """
 511        Perform operations with `Volume` objects.
 512        Keyword `volume2` can be a constant float.
 513
 514        Possible operations are:
 515        ```
 516        +, -, /, 1/x, sin, cos, exp, log,
 517        abs, **2, sqrt, min, max, atan, atan2, median,
 518        mag, dot, gradient, divergence, laplacian.
 519        ```
 520
 521        Examples:
 522            - [volume_operations.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/volume_operations.py)
 523        """
 524        op = operation.lower()
 525        image1 = self._data
 526
 527        mf = None
 528        if op in ["median"]:
 529            mf = vtk.vtkImageMedian3D()
 530            mf.SetInputData(image1)
 531        elif op in ["mag"]:
 532            mf = vtk.vtkImageMagnitude()
 533            mf.SetInputData(image1)
 534        elif op in ["dot", "dotproduct"]:
 535            mf = vtk.vtkImageDotProduct()
 536            mf.SetInput1Data(image1)
 537            mf.SetInput2Data(volume2.imagedata())
 538        elif op in ["grad", "gradient"]:
 539            mf = vtk.vtkImageGradient()
 540            mf.SetDimensionality(3)
 541            mf.SetInputData(image1)
 542        elif op in ["div", "divergence"]:
 543            mf = vtk.vtkImageDivergence()
 544            mf.SetInputData(image1)
 545        elif op in ["laplacian"]:
 546            mf = vtk.vtkImageLaplacian()
 547            mf.SetDimensionality(3)
 548            mf.SetInputData(image1)
 549
 550        if mf is not None:
 551            mf.Update()
 552            vol = Volume(mf.GetOutput())
 553            vol.pipeline = utils.OperationNode(
 554                f"operation\n{op}", parents=[self], c="#4cc9f0", shape="cylinder"
 555            )
 556            return vol  ###########################
 557
 558        mat = vtk.vtkImageMathematics()
 559        mat.SetInput1Data(image1)
 560
 561        K = None
 562
 563        if utils.is_number(volume2):
 564            K = volume2
 565            mat.SetConstantK(K)
 566            mat.SetConstantC(K)
 567
 568        elif volume2 is not None:  # assume image2 is a constant value
 569            mat.SetInput2Data(volume2.imagedata())
 570
 571        # ###########################
 572        if op in ["+", "add", "plus"]:
 573            if K:
 574                mat.SetOperationToAddConstant()
 575            else:
 576                mat.SetOperationToAdd()
 577
 578        elif op in ["-", "subtract", "minus"]:
 579            if K:
 580                mat.SetConstantC(-float(K))
 581                mat.SetOperationToAddConstant()
 582            else:
 583                mat.SetOperationToSubtract()
 584
 585        elif op in ["*", "multiply", "times"]:
 586            if K:
 587                mat.SetOperationToMultiplyByK()
 588            else:
 589                mat.SetOperationToMultiply()
 590
 591        elif op in ["/", "divide"]:
 592            if K:
 593                mat.SetConstantK(1.0 / K)
 594                mat.SetOperationToMultiplyByK()
 595            else:
 596                mat.SetOperationToDivide()
 597
 598        elif op in ["1/x", "invert"]:
 599            mat.SetOperationToInvert()
 600        elif op in ["sin"]:
 601            mat.SetOperationToSin()
 602        elif op in ["cos"]:
 603            mat.SetOperationToCos()
 604        elif op in ["exp"]:
 605            mat.SetOperationToExp()
 606        elif op in ["log"]:
 607            mat.SetOperationToLog()
 608        elif op in ["abs"]:
 609            mat.SetOperationToAbsoluteValue()
 610        elif op in ["**2", "square"]:
 611            mat.SetOperationToSquare()
 612        elif op in ["sqrt", "sqr"]:
 613            mat.SetOperationToSquareRoot()
 614        elif op in ["min"]:
 615            mat.SetOperationToMin()
 616        elif op in ["max"]:
 617            mat.SetOperationToMax()
 618        elif op in ["atan"]:
 619            mat.SetOperationToATAN()
 620        elif op in ["atan2"]:
 621            mat.SetOperationToATAN2()
 622        else:
 623            vedo.logger.error(f"unknown operation {operation}")
 624            raise RuntimeError()
 625        mat.Update()
 626
 627        self._update(mat.GetOutput())
 628
 629        self.pipeline = utils.OperationNode(
 630            f"operation\n{op}", parents=[self, volume2], shape="cylinder", c="#4cc9f0"
 631        )
 632        return self
 633
 634    def frequency_pass_filter(self, low_cutoff=None, high_cutoff=None, order=1):
 635        """
 636        Low-pass and high-pass filtering become trivial in the frequency domain.
 637        A portion of the pixels/voxels are simply masked or attenuated.
 638        This function applies a high pass Butterworth filter that attenuates the
 639        frequency domain image.
 640
 641        The gradual attenuation of the filter is important.
 642        A simple high-pass filter would simply mask a set of pixels in the frequency domain,
 643        but the abrupt transition would cause a ringing effect in the spatial domain.
 644
 645        Arguments:
 646            low_cutoff : (list)
 647                the cutoff frequencies for x, y and z
 648            high_cutoff : (list)
 649                the cutoff frequencies for x, y and z
 650            order : (int)
 651                order determines sharpness of the cutoff curve
 652        """
 653        # https://lorensen.github.io/VTKExamples/site/Cxx/ImageProcessing/IdealHighPass
 654        fft = vtk.vtkImageFFT()
 655        fft.SetInputData(self._data)
 656        fft.Update()
 657        out = fft.GetOutput()
 658
 659        if high_cutoff:
 660            blp = vtk.vtkImageButterworthLowPass()
 661            blp.SetInputData(out)
 662            blp.SetCutOff(high_cutoff)
 663            blp.SetOrder(order)
 664            blp.Update()
 665            out = blp.GetOutput()
 666
 667        if low_cutoff:
 668            bhp = vtk.vtkImageButterworthHighPass()
 669            bhp.SetInputData(out)
 670            bhp.SetCutOff(low_cutoff)
 671            bhp.SetOrder(order)
 672            bhp.Update()
 673            out = bhp.GetOutput()
 674
 675        rfft = vtk.vtkImageRFFT()
 676        rfft.SetInputData(out)
 677        rfft.Update()
 678
 679        ecomp = vtk.vtkImageExtractComponents()
 680        ecomp.SetInputData(rfft.GetOutput())
 681        ecomp.SetComponents(0)
 682        ecomp.Update()
 683        self._update(ecomp.GetOutput())
 684        self.pipeline = utils.OperationNode("frequency_pass_filter", parents=[self], c="#4cc9f0")
 685        return self
 686
 687    def smooth_gaussian(self, sigma=(2, 2, 2), radius=None):
 688        """
 689        Performs a convolution of the input Volume with a gaussian.
 690
 691        Arguments:
 692            sigma : (float, list)
 693                standard deviation(s) in voxel units.
 694                A list can be given to smooth in the three direction differently.
 695            radius : (float, list)
 696                radius factor(s) determine how far out the gaussian
 697                kernel will go before being clamped to zero. A list can be given too.
 698        """
 699        gsf = vtk.vtkImageGaussianSmooth()
 700        gsf.SetDimensionality(3)
 701        gsf.SetInputData(self.imagedata())
 702        if utils.is_sequence(sigma):
 703            gsf.SetStandardDeviations(sigma)
 704        else:
 705            gsf.SetStandardDeviation(sigma)
 706        if radius is not None:
 707            if utils.is_sequence(radius):
 708                gsf.SetRadiusFactors(radius)
 709            else:
 710                gsf.SetRadiusFactor(radius)
 711        gsf.Update()
 712        self._update(gsf.GetOutput())
 713        self.pipeline = utils.OperationNode("smooth_gaussian", parents=[self], c="#4cc9f0")
 714        return self
 715
 716    def smooth_median(self, neighbours=(2, 2, 2)):
 717        """
 718        Median filter that replaces each pixel with the median value
 719        from a rectangular neighborhood around that pixel.
 720        """
 721        imgm = vtk.vtkImageMedian3D()
 722        imgm.SetInputData(self.imagedata())
 723        if utils.is_sequence(neighbours):
 724            imgm.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
 725        else:
 726            imgm.SetKernelSize(neighbours, neighbours, neighbours)
 727        imgm.Update()
 728        self._update(imgm.GetOutput())
 729        self.pipeline = utils.OperationNode("smooth_median", parents=[self], c="#4cc9f0")
 730        return self
 731
 732    def erode(self, neighbours=(2, 2, 2)):
 733        """
 734        Replace a voxel with the minimum over an ellipsoidal neighborhood of voxels.
 735        If `neighbours` of an axis is 1, no processing is done on that axis.
 736
 737        Examples:
 738            - [erode_dilate.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/erode_dilate.py)
 739
 740                ![](https://vedo.embl.es/images/volumetric/erode_dilate.png)
 741        """
 742        ver = vtk.vtkImageContinuousErode3D()
 743        ver.SetInputData(self._data)
 744        ver.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
 745        ver.Update()
 746        self._update(ver.GetOutput())
 747        self.pipeline = utils.OperationNode("erode", parents=[self], c="#4cc9f0")
 748        return self
 749
 750    def dilate(self, neighbours=(2, 2, 2)):
 751        """
 752        Replace a voxel with the maximum over an ellipsoidal neighborhood of voxels.
 753        If `neighbours` of an axis is 1, no processing is done on that axis.
 754
 755        Check also `erode()` and `pad()`.
 756
 757        Examples:
 758            - [erode_dilate.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/erode_dilate.py)
 759        """
 760        ver = vtk.vtkImageContinuousDilate3D()
 761        ver.SetInputData(self._data)
 762        ver.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
 763        ver.Update()
 764        self._update(ver.GetOutput())
 765        self.pipeline = utils.OperationNode("dilate", parents=[self], c="#4cc9f0")
 766        return self
 767
 768    def magnitude(self):
 769        """Colapses components with magnitude function."""
 770        imgm = vtk.vtkImageMagnitude()
 771        imgm.SetInputData(self.imagedata())
 772        imgm.Update()
 773        self._update(imgm.GetOutput())
 774        self.pipeline = utils.OperationNode("magnitude", parents=[self], c="#4cc9f0")
 775        return self
 776
 777    def topoints(self):
 778        """
 779        Extract all image voxels as points.
 780        This function takes an input `Volume` and creates an `Mesh`
 781        that contains the points and the point attributes.
 782
 783        Examples:
 784            - [vol2points.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/vol2points.py)
 785        """
 786        v2p = vtk.vtkImageToPoints()
 787        v2p.SetInputData(self.imagedata())
 788        v2p.Update()
 789        mpts = vedo.Points(v2p.GetOutput())
 790        mpts.pipeline = utils.OperationNode("topoints", parents=[self], c="#4cc9f0:#e9c46a")
 791        return mpts
 792
 793    def euclidean_distance(self, anisotropy=False, max_distance=None):
 794        """
 795        Implementation of the Euclidean DT (Distance Transform) using Saito's algorithm.
 796        The distance map produced contains the square of the Euclidean distance values.
 797        The algorithm has a O(n^(D+1)) complexity over n x n x...x n images in D dimensions.
 798
 799        Check out also: https://en.wikipedia.org/wiki/Distance_transform
 800
 801        Arguments:
 802            anisotropy : bool
 803                used to define whether Spacing should be used in the
 804                computation of the distances.
 805            max_distance : bool
 806                any distance bigger than max_distance will not be
 807                computed but set to this specified value instead.
 808
 809        Examples:
 810            - [euclidian_dist.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/euclidian_dist.py)
 811        """
 812        euv = vtk.vtkImageEuclideanDistance()
 813        euv.SetInputData(self._data)
 814        euv.SetConsiderAnisotropy(anisotropy)
 815        if max_distance is not None:
 816            euv.InitializeOn()
 817            euv.SetMaximumDistance(max_distance)
 818        euv.SetAlgorithmToSaito()
 819        euv.Update()
 820        vol = Volume(euv.GetOutput())
 821        vol.pipeline = utils.OperationNode("euclidean_distance", parents=[self], c="#4cc9f0")
 822        return vol
 823
 824    def correlation_with(self, vol2, dim=2):
 825        """
 826        Find the correlation between two volumetric data sets.
 827        Keyword `dim` determines whether the correlation will be 3D, 2D or 1D.
 828        The default is a 2D Correlation.
 829
 830        The output size will match the size of the first input.
 831        The second input is considered the correlation kernel.
 832        """
 833        imc = vtk.vtkImageCorrelation()
 834        imc.SetInput1Data(self._data)
 835        imc.SetInput2Data(vol2.imagedata())
 836        imc.SetDimensionality(dim)
 837        imc.Update()
 838        vol = Volume(imc.GetOutput())
 839
 840        vol.pipeline = utils.OperationNode("correlation_with", parents=[self, vol2], c="#4cc9f0")
 841        return vol
 842
 843    def scale_voxels(self, scale=1):
 844        """Scale the voxel content by factor `scale`."""
 845        rsl = vtk.vtkImageReslice()
 846        rsl.SetInputData(self.imagedata())
 847        rsl.SetScalarScale(scale)
 848        rsl.Update()
 849        self._update(rsl.GetOutput())
 850        self.pipeline = utils.OperationNode(
 851            f"scale_voxels\nscale={scale}", parents=[self], c="#4cc9f0"
 852        )
 853        return self
 854
 855
 856##########################################################################
 857class Volume(BaseVolume, BaseGrid, vtk.vtkVolume):
 858    """
 859    Class to describe dataset that are defined on "voxels":
 860    the 3D equivalent of 2D pixels.
 861    """
 862
 863    def __init__(
 864        self,
 865        inputobj=None,
 866        c="RdBu_r",
 867        alpha=(0.0, 0.0, 0.2, 0.4, 0.8, 1.0),
 868        alpha_gradient=None,
 869        alpha_unit=1,
 870        mode=0,
 871        spacing=None,
 872        dims=None,
 873        origin=None,
 874        mapper="smart",
 875    ):
 876        """
 877        This class can be initialized with a numpy object, a `vtkImageData`
 878        or a list of 2D bmp files.
 879
 880        Arguments:
 881            c : (list, str)
 882                sets colors along the scalar range, or a matplotlib color map name
 883            alphas : (float, list)
 884                sets transparencies along the scalar range
 885            alpha_unit : (float)
 886                low values make composite rendering look brighter and denser
 887            origin : (list)
 888                set volume origin coordinates
 889            spacing : (list)
 890                voxel dimensions in x, y and z.
 891            dims : (list)
 892                specify the dimensions of the volume.
 893            mapper : (str)
 894                either 'gpu', 'opengl_gpu', 'fixed' or 'smart'
 895            mode : (int)
 896                define the volumetric rendering style:
 897                    - 0, composite rendering
 898                    - 1, maximum projection
 899                    - 2, minimum projection
 900                    - 3, average projection
 901                    - 4, additive mode
 902
 903                <br>The default mode is "composite" where the scalar values are sampled through
 904                the volume and composited in a front-to-back scheme through alpha blending.
 905                The final color and opacity is determined using the color and opacity transfer
 906                functions specified in alpha keyword.
 907
 908                Maximum and minimum intensity blend modes use the maximum and minimum
 909                scalar values, respectively, along the sampling ray.
 910                The final color and opacity is determined by passing the resultant value
 911                through the color and opacity transfer functions.
 912
 913                Additive blend mode accumulates scalar values by passing each value
 914                through the opacity transfer function and then adding up the product
 915                of the value and its opacity. In other words, the scalar values are scaled
 916                using the opacity transfer function and summed to derive the final color.
 917                Note that the resulting image is always grayscale i.e. aggregated values
 918                are not passed through the color transfer function.
 919                This is because the final value is a derived value and not a real data value
 920                along the sampling ray.
 921
 922                Average intensity blend mode works similar to the additive blend mode where
 923                the scalar values are multiplied by opacity calculated from the opacity
 924                transfer function and then added.
 925                The additional step here is to divide the sum by the number of samples
 926                taken through the volume.
 927                As is the case with the additive intensity projection, the final image will
 928                always be grayscale i.e. the aggregated values are not passed through the
 929                color transfer function.
 930
 931        Example:
 932            ```python
 933            from vedo import Volume
 934            vol = Volume("path/to/mydata/rec*.bmp")
 935            vol.show()
 936            ```
 937
 938        Examples:
 939            - [numpy2volume1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/numpy2volume1.py)
 940
 941                ![](https://vedo.embl.es/images/volumetric/numpy2volume1.png)
 942
 943            - [read_volume2.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/read_volume2.py)
 944
 945                ![](https://vedo.embl.es/images/volumetric/read_volume2.png)
 946
 947        .. note::
 948            if a `list` of values is used for `alphas` this is interpreted
 949            as a transfer function along the range of the scalar.
 950        """
 951        vtk.vtkVolume.__init__(self)
 952        BaseGrid.__init__(self)
 953        BaseVolume.__init__(self)
 954        # super().__init__()
 955
 956        ###################
 957        if isinstance(inputobj, str):
 958
 959            if "https://" in inputobj:
 960                inputobj = vedo.file_io.download(inputobj, verbose=False)  # fpath
 961            elif os.path.isfile(inputobj):
 962                pass
 963            else:
 964                inputobj = sorted(glob.glob(inputobj))
 965
 966        ###################
 967        if "gpu" in mapper:
 968            self._mapper = vtk.vtkGPUVolumeRayCastMapper()
 969        elif "opengl_gpu" in mapper:
 970            self._mapper = vtk.vtkOpenGLGPUVolumeRayCastMapper()
 971        elif "smart" in mapper:
 972            self._mapper = vtk.vtkSmartVolumeMapper()
 973        elif "fixed" in mapper:
 974            self._mapper = vtk.vtkFixedPointVolumeRayCastMapper()
 975        elif isinstance(mapper, vtk.vtkMapper):
 976            self._mapper = mapper
 977        else:
 978            print("Error unknown mapper type", [mapper])
 979            raise RuntimeError()
 980        self.SetMapper(self._mapper)
 981
 982        ###################
 983        inputtype = str(type(inputobj))
 984
 985        # print('Volume inputtype', inputtype, c='b')
 986
 987        if inputobj is None:
 988            img = vtk.vtkImageData()
 989
 990        elif utils.is_sequence(inputobj):
 991
 992            if isinstance(inputobj[0], str) and ".bmp" in inputobj[0].lower():
 993                # scan sequence of BMP files
 994                ima = vtk.vtkImageAppend()
 995                ima.SetAppendAxis(2)
 996                pb = utils.ProgressBar(0, len(inputobj))
 997                for i in pb.range():
 998                    f = inputobj[i]
 999                    if "_rec_spr.bmp" in f:
1000                        continue
1001                    picr = vtk.vtkBMPReader()
1002                    picr.SetFileName(f)
1003                    picr.Update()
1004                    mgf = vtk.vtkImageMagnitude()
1005                    mgf.SetInputData(picr.GetOutput())
1006                    mgf.Update()
1007                    ima.AddInputData(mgf.GetOutput())
1008                    pb.print("loading...")
1009                ima.Update()
1010                img = ima.GetOutput()
1011
1012            else:
1013
1014                if len(inputobj.shape) == 1:
1015                    varr = utils.numpy2vtk(inputobj)
1016                else:
1017                    varr = utils.numpy2vtk(inputobj.ravel(order="F"))
1018                varr.SetName("input_scalars")
1019
1020                img = vtk.vtkImageData()
1021                if dims is not None:
1022                    img.SetDimensions(dims[2], dims[1], dims[0])
1023                else:
1024                    if len(inputobj.shape) == 1:
1025                        vedo.logger.error("must set dimensions (dims keyword) in Volume")
1026                        raise RuntimeError()
1027                    img.SetDimensions(inputobj.shape)
1028                img.GetPointData().AddArray(varr)
1029                img.GetPointData().SetActiveScalars(varr.GetName())
1030
1031        elif "ImageData" in inputtype:
1032            img = inputobj
1033
1034        elif isinstance(inputobj, Volume):
1035            img = inputobj.inputdata()
1036
1037        elif "UniformGrid" in inputtype:
1038            img = inputobj
1039
1040        elif hasattr(inputobj, "GetOutput"):  # passing vtk object, try extract imagdedata
1041            if hasattr(inputobj, "Update"):
1042                inputobj.Update()
1043            img = inputobj.GetOutput()
1044
1045        elif isinstance(inputobj, str):
1046            if "https://" in inputobj:
1047                inputobj = vedo.file_io.download(inputobj, verbose=False)
1048            img = vedo.file_io.loadImageData(inputobj)
1049
1050        else:
1051            vedo.logger.error(f"cannot understand input type {inputtype}")
1052            return
1053
1054        if dims is not None:
1055            img.SetDimensions(dims)
1056
1057        if origin is not None:
1058            img.SetOrigin(origin)  ### DIFFERENT from volume.origin()!
1059
1060        if spacing is not None:
1061            img.SetSpacing(spacing)
1062
1063        self._data = img
1064        self._mapper.SetInputData(img)
1065
1066        if img.GetPointData().GetScalars():
1067            if img.GetPointData().GetScalars().GetNumberOfComponents() == 1:
1068                self.mode(mode).color(c).alpha(alpha).alpha_gradient(alpha_gradient)
1069                self.GetProperty().SetShade(True)
1070                self.GetProperty().SetInterpolationType(1)
1071                self.GetProperty().SetScalarOpacityUnitDistance(alpha_unit)
1072
1073        # remember stuff:
1074        self._mode = mode
1075        self._color = c
1076        self._alpha = alpha
1077        self._alpha_grad = alpha_gradient
1078        self._alpha_unit = alpha_unit
1079
1080        self.pipeline = utils.OperationNode(
1081            "Volume", comment=f"dims={tuple(self.dimensions())}", c="#4cc9f0"
1082        )
1083        #######################################################################
1084
1085    def _update(self, data):
1086        self._data = data
1087        self._data.GetPointData().Modified()
1088        self._mapper.SetInputData(data)
1089        self._mapper.Modified()
1090        self._mapper.Update()
1091        return self
1092
1093    def mode(self, mode=None):
1094        """
1095        Define the volumetric rendering mode following this:
1096            - 0, composite rendering
1097            - 1, maximum projection rendering
1098            - 2, minimum projection rendering
1099            - 3, average projection rendering
1100            - 4, additive mode
1101
1102        The default mode is "composite" where the scalar values are sampled through
1103        the volume and composited in a front-to-back scheme through alpha blending.
1104        The final color and opacity is determined using the color and opacity transfer
1105        functions specified in alpha keyword.
1106
1107        Maximum and minimum intensity blend modes use the maximum and minimum
1108        scalar values, respectively, along the sampling ray.
1109        The final color and opacity is determined by passing the resultant value
1110        through the color and opacity transfer functions.
1111
1112        Additive blend mode accumulates scalar values by passing each value
1113        through the opacity transfer function and then adding up the product
1114        of the value and its opacity. In other words, the scalar values are scaled
1115        using the opacity transfer function and summed to derive the final color.
1116        Note that the resulting image is always grayscale i.e. aggregated values
1117        are not passed through the color transfer function.
1118        This is because the final value is a derived value and not a real data value
1119        along the sampling ray.
1120
1121        Average intensity blend mode works similar to the additive blend mode where
1122        the scalar values are multiplied by opacity calculated from the opacity
1123        transfer function and then added.
1124        The additional step here is to divide the sum by the number of samples
1125        taken through the volume.
1126        As is the case with the additive intensity projection, the final image will
1127        always be grayscale i.e. the aggregated values are not passed through the
1128        color transfer function.
1129        """
1130        if mode is None:
1131            return self._mapper.GetBlendMode()
1132
1133        if isinstance(mode, str):
1134            if "comp" in mode:
1135                mode = 0
1136            elif "proj" in mode:
1137                if "max" in mode:
1138                    mode = 1
1139                elif "min" in mode:
1140                    mode = 2
1141                elif "ave" in mode:
1142                    mode = 3
1143                else:
1144                    vedo.logger.warning(f"unknown mode {mode}")
1145                    mode = 0
1146            elif "add" in mode:
1147                mode = 4
1148            else:
1149                vedo.logger.warning(f"unknown mode {mode}")
1150                mode = 0
1151
1152        self._mapper.SetBlendMode(mode)
1153        self._mode = mode
1154        return self
1155
1156    def shade(self, status=None):
1157        """
1158        Set/Get the shading of a Volume.
1159        Shading can be further controlled with `volume.lighting()` method.
1160
1161        If shading is turned on, the mapper may perform shading calculations.
1162        In some cases shading does not apply
1163        (for example, in maximum intensity projection mode).
1164        """
1165        if status is None:
1166            return self.GetProperty().GetShade()
1167        self.GetProperty().SetShade(status)
1168        return self
1169
1170    def cmap(self, c, alpha=None, vmin=None, vmax=None):
1171        """Same as `color()`.
1172
1173        Arguments:
1174            alpha : (list)
1175                use a list to specify transparencies along the scalar range
1176            vmin : (float)
1177                force the min of the scalar range to be this value
1178            vmax : (float)
1179                force the max of the scalar range to be this value
1180        """
1181        return self.color(c, alpha, vmin, vmax)
1182
1183    def jittering(self, status=None):
1184        """
1185        If `True`, each ray traversal direction will be perturbed slightly
1186        using a noise-texture to get rid of wood-grain effects.
1187        """
1188        if hasattr(self._mapper, "SetUseJittering"):  # tetmesh doesnt have it
1189            if status is None:
1190                return self._mapper.GetUseJittering()
1191            self._mapper.SetUseJittering(status)
1192        return self
1193
1194    def mask(self, data):
1195        """
1196        Mask a volume visualization with a binary value.
1197        Needs to specify keyword mapper='gpu'.
1198
1199        Example:
1200        ```python
1201            from vedo import np, Volume, show
1202            data_matrix = np.zeros([75, 75, 75], dtype=np.uint8)
1203            # all voxels have value zero except:
1204            data_matrix[0:35,   0:35,  0:35] = 1
1205            data_matrix[35:55, 35:55, 35:55] = 2
1206            data_matrix[55:74, 55:74, 55:74] = 3
1207            vol = Volume(data_matrix, c=['white','b','g','r'], mapper='gpu')
1208            data_mask = np.zeros_like(data_matrix)
1209            data_mask[10:65, 10:45, 20:75] = 1
1210            vol.mask(data_mask)
1211            show(vol, axes=1).close()
1212        ```
1213        See also:
1214            `volume.hide_voxels()`
1215        """
1216        mask = Volume(data.astype(np.uint8))
1217        try:
1218            self.mapper().SetMaskTypeToBinary()
1219            self.mapper().SetMaskInput(mask.inputdata())
1220        except AttributeError:
1221            vedo.logger.error("volume.mask() must create the volume with Volume(..., mapper='gpu')")
1222        return self
1223
1224    def hide_voxels(self, ids):
1225        """
1226        Hide voxels (cells) from visualization.
1227
1228        Example:
1229            ```python
1230            from vedo import *
1231            embryo = Volume(dataurl+'embryo.tif')
1232            embryo.hide_voxels(list(range(10000)))
1233            show(embryo, axes=1).close()
1234            ```
1235
1236        See also:
1237            `volume.mask()`
1238        """
1239        ghost_mask = np.zeros(self.ncells, dtype=np.uint8)
1240        ghost_mask[ids] = vtk.vtkDataSetAttributes.HIDDENCELL
1241        name = vtk.vtkDataSetAttributes.GhostArrayName()
1242        garr = utils.numpy2vtk(ghost_mask, name=name, dtype=np.uint8)
1243        self._data.GetCellData().AddArray(garr)
1244        self._data.GetCellData().Modified()
1245        return self
1246
1247    def alpha_gradient(self, alpha_grad, vmin=None, vmax=None):
1248        """
1249        Assign a set of tranparencies to a volume's gradient
1250        along the range of the scalar value.
1251        A single constant value can also be assigned.
1252        The gradient function is used to decrease the opacity
1253        in the "flat" regions of the volume while maintaining the opacity
1254        at the boundaries between material types.  The gradient is measured
1255        as the amount by which the intensity changes over unit distance.
1256
1257        The format for alpha_grad is the same as for method `volume.alpha()`.
1258        """
1259        if vmin is None:
1260            vmin, _ = self._data.GetScalarRange()
1261        if vmax is None:
1262            _, vmax = self._data.GetScalarRange()
1263        self._alpha_grad = alpha_grad
1264        volumeProperty = self.GetProperty()
1265
1266        if alpha_grad is None:
1267            volumeProperty.DisableGradientOpacityOn()
1268            return self
1269
1270        volumeProperty.DisableGradientOpacityOff()
1271
1272        gotf = volumeProperty.GetGradientOpacity()
1273        if utils.is_sequence(alpha_grad):
1274            alpha_grad = np.array(alpha_grad)
1275            if len(alpha_grad.shape) == 1:  # user passing a flat list e.g. (0.0, 0.3, 0.9, 1)
1276                for i, al in enumerate(alpha_grad):
1277                    xalpha = vmin + (vmax - vmin) * i / (len(alpha_grad) - 1)
1278                    # Create transfer mapping scalar value to gradient opacity
1279                    gotf.AddPoint(xalpha, al)
1280            elif len(alpha_grad.shape) == 2:  # user passing [(x0,alpha0), ...]
1281                gotf.AddPoint(vmin, alpha_grad[0][1])
1282                for xalpha, al in alpha_grad:
1283                    # Create transfer mapping scalar value to opacity
1284                    gotf.AddPoint(xalpha, al)
1285                gotf.AddPoint(vmax, alpha_grad[-1][1])
1286            # print("alpha_grad at", round(xalpha, 1), "\tset to", al)
1287        else:
1288            gotf.AddPoint(vmin, alpha_grad)  # constant alpha_grad
1289            gotf.AddPoint(vmax, alpha_grad)
1290        return self
1291
1292    def component_weight(self, i, weight):
1293        """Set the scalar component weight in range [0,1]."""
1294        self.GetProperty().SetComponentWeight(i, weight)
1295        return self
1296
1297    def xslice(self, i):
1298        """Extract the slice at index `i` of volume along x-axis."""
1299        vslice = vtk.vtkImageDataGeometryFilter()
1300        vslice.SetInputData(self.imagedata())
1301        nx, ny, nz = self.imagedata().GetDimensions()
1302        if i > nx - 1:
1303            i = nx - 1
1304        vslice.SetExtent(i, i, 0, ny, 0, nz)
1305        vslice.Update()
1306        m = Mesh(vslice.GetOutput())
1307        m.pipeline = utils.OperationNode(f"xslice {i}", parents=[self], c="#4cc9f0:#e9c46a")
1308        return m
1309
1310    def yslice(self, j):
1311        """Extract the slice at index `j` of volume along y-axis."""
1312        vslice = vtk.vtkImageDataGeometryFilter()
1313        vslice.SetInputData(self.imagedata())
1314        nx, ny, nz = self.imagedata().GetDimensions()
1315        if j > ny - 1:
1316            j = ny - 1
1317        vslice.SetExtent(0, nx, j, j, 0, nz)
1318        vslice.Update()
1319        m = Mesh(vslice.GetOutput())
1320        m.pipeline = utils.OperationNode(f"yslice {j}", parents=[self], c="#4cc9f0:#e9c46a")
1321        return m
1322
1323    def zslice(self, k):
1324        """Extract the slice at index `i` of volume along z-axis."""
1325        vslice = vtk.vtkImageDataGeometryFilter()
1326        vslice.SetInputData(self.imagedata())
1327        nx, ny, nz = self.imagedata().GetDimensions()
1328        if k > nz - 1:
1329            k = nz - 1
1330        vslice.SetExtent(0, nx, 0, ny, k, k)
1331        vslice.Update()
1332        m = Mesh(vslice.GetOutput())
1333        m.pipeline = utils.OperationNode(f"zslice {k}", parents=[self], c="#4cc9f0:#e9c46a")
1334        return m
1335
1336    def slice_plane(self, origin=(0, 0, 0), normal=(1, 1, 1), autocrop=False):
1337        """
1338        Extract the slice along a given plane position and normal.
1339
1340        Example:
1341            - [slice_plane1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/slice_plane1.py)
1342
1343                ![](https://vedo.embl.es/images/volumetric/slicePlane1.gif)
1344        """
1345        reslice = vtk.vtkImageReslice()
1346        reslice.SetInputData(self._data)
1347        reslice.SetOutputDimensionality(2)
1348        newaxis = utils.versor(normal)
1349        pos = np.array(origin)
1350        initaxis = (0, 0, 1)
1351        crossvec = np.cross(initaxis, newaxis)
1352        angle = np.arccos(np.dot(initaxis, newaxis))
1353        T = vtk.vtkTransform()
1354        T.PostMultiply()
1355        T.RotateWXYZ(np.rad2deg(angle), crossvec)
1356        T.Translate(pos)
1357        M = T.GetMatrix()
1358        reslice.SetResliceAxes(M)
1359        reslice.SetInterpolationModeToLinear()
1360        reslice.SetAutoCropOutput(not autocrop)
1361        reslice.Update()
1362        vslice = vtk.vtkImageDataGeometryFilter()
1363        vslice.SetInputData(reslice.GetOutput())
1364        vslice.Update()
1365        msh = Mesh(vslice.GetOutput())
1366        msh.SetOrientation(T.GetOrientation())
1367        msh.SetPosition(pos)
1368        msh.pipeline = utils.OperationNode("slice_plane", parents=[self], c="#4cc9f0:#e9c46a")
1369        return msh
1370
1371    def warp(self, source, target, sigma=1, mode="3d", fit=False):
1372        """
1373        Warp volume scalars within a Volume by specifying
1374        source and target sets of points.
1375
1376        Arguments:
1377            source : (Points, list)
1378                the list of source points
1379            target : (Points, list)
1380                the list of target points
1381            fit : (bool)
1382                fit/adapt the old bounding box to the warped geometry
1383        """
1384        if isinstance(source, vedo.Points):
1385            source = source.points()
1386        if isinstance(target, vedo.Points):
1387            target = target.points()
1388
1389        ns = len(source)
1390        ptsou = vtk.vtkPoints()
1391        ptsou.SetNumberOfPoints(ns)
1392        for i in range(ns):
1393            ptsou.SetPoint(i, source[i])
1394
1395        nt = len(target)
1396        if ns != nt:
1397            vedo.logger.error(f"#source {ns} != {nt} #target points")
1398            raise RuntimeError()
1399
1400        pttar = vtk.vtkPoints()
1401        pttar.SetNumberOfPoints(nt)
1402        for i in range(ns):
1403            pttar.SetPoint(i, target[i])
1404
1405        T = vtk.vtkThinPlateSplineTransform()
1406        if mode.lower() == "3d":
1407            T.SetBasisToR()
1408        elif mode.lower() == "2d":
1409            T.SetBasisToR2LogR()
1410        else:
1411            vedo.logger.error(f"unknown mode {mode}")
1412            raise RuntimeError()
1413
1414        T.SetSigma(sigma)
1415        T.SetSourceLandmarks(ptsou)
1416        T.SetTargetLandmarks(pttar)
1417        T.Inverse()
1418        self.transform = T
1419        self.apply_transform(T, fit=fit)
1420        self.pipeline = utils.OperationNode("warp", parents=[self], c="#4cc9f0")
1421        return self
1422
1423    def apply_transform(self, T, fit=False):
1424        """
1425        Apply a transform to the scalars in the volume.
1426
1427        Arguments:
1428            T : (vtkTransform, matrix)
1429                The transformation to be applied
1430            fit : (bool)
1431                fit/adapt the old bounding box to the warped geometry
1432        """
1433        if isinstance(T, vtk.vtkMatrix4x4):
1434            tr = vtk.vtkTransform()
1435            tr.SetMatrix(T)
1436            T = tr
1437
1438        elif utils.is_sequence(T):
1439            M = vtk.vtkMatrix4x4()
1440            n = len(T[0])
1441            for i in range(n):
1442                for j in range(n):
1443                    M.SetElement(i, j, T[i][j])
1444            tr = vtk.vtkTransform()
1445            tr.SetMatrix(M)
1446            T = tr
1447
1448        reslice = vtk.vtkImageReslice()
1449        reslice.SetInputData(self._data)
1450        reslice.SetResliceTransform(T)
1451        reslice.SetOutputDimensionality(3)
1452        reslice.SetInterpolationModeToLinear()
1453
1454        spacing = self._data.GetSpacing()
1455        origin = self._data.GetOrigin()
1456
1457        if fit:
1458            bb = self.box()
1459            if isinstance(T, vtk.vtkThinPlateSplineTransform):
1460                TI = vtk.vtkThinPlateSplineTransform()
1461                TI.DeepCopy(T)
1462                TI.Inverse()
1463            else:
1464                TI = vtk.vtkTransform()
1465                TI.DeepCopy(T)
1466            bb.apply_transform(TI)
1467            bounds = bb.GetBounds()
1468            bounds = (
1469                bounds[0] / spacing[0],
1470                bounds[1] / spacing[0],
1471                bounds[2] / spacing[1],
1472                bounds[3] / spacing[1],
1473                bounds[4] / spacing[2],
1474                bounds[5] / spacing[2],
1475            )
1476            bounds = np.round(bounds).astype(int)
1477            reslice.SetOutputExtent(bounds)
1478            reslice.SetOutputSpacing(spacing[0], spacing[1], spacing[2])
1479            reslice.SetOutputOrigin(origin[0], origin[1], origin[2])
1480
1481        reslice.Update()
1482        self._update(reslice.GetOutput())
1483        self.pipeline = utils.OperationNode("apply_transform", parents=[self], c="#4cc9f0")
1484        return self
1485
1486
1487##########################################################################
1488class VolumeSlice(BaseVolume, Base3DProp, vtk.vtkImageSlice):
1489    """
1490    Derived class of `vtkImageSlice`.
1491    """
1492
1493    def __init__(self, inputobj=None):
1494        """
1495        This class is equivalent to `Volume` except for its representation.
1496        The main purpose of this class is to be used in conjunction with `Volume`
1497        for visualization using `mode="image"`.
1498        """
1499        vtk.vtkImageSlice.__init__(self)
1500        Base3DProp.__init__(self)
1501        BaseVolume.__init__(self)
1502        # super().__init__()
1503
1504        self._mapper = vtk.vtkImageResliceMapper()
1505        self._mapper.SliceFacesCameraOn()
1506        self._mapper.SliceAtFocalPointOn()
1507        self._mapper.SetAutoAdjustImageQuality(False)
1508        self._mapper.BorderOff()
1509
1510        self.lut = None
1511
1512        self.property = vtk.vtkImageProperty()
1513        self.property.SetInterpolationTypeToLinear()
1514        self.SetProperty(self.property)
1515
1516        ###################
1517        if isinstance(inputobj, str):
1518            if "https://" in inputobj:
1519                inputobj = vedo.file_io.download(inputobj, verbose=False)  # fpath
1520            elif os.path.isfile(inputobj):
1521                pass
1522            else:
1523                inputobj = sorted(glob.glob(inputobj))
1524
1525        ###################
1526        inputtype = str(type(inputobj))
1527
1528        if inputobj is None:
1529            img = vtk.vtkImageData()
1530
1531        if isinstance(inputobj, Volume):
1532            img = inputobj.imagedata()
1533            self.lut = utils.ctf2lut(inputobj)
1534
1535        elif utils.is_sequence(inputobj):
1536
1537            if isinstance(inputobj[0], str):  # scan sequence of BMP files
1538                ima = vtk.vtkImageAppend()
1539                ima.SetAppendAxis(2)
1540                pb = utils.ProgressBar(0, len(inputobj))
1541                for i in pb.range():
1542                    f = inputobj[i]
1543                    picr = vtk.vtkBMPReader()
1544                    picr.SetFileName(f)
1545                    picr.Update()
1546                    mgf = vtk.vtkImageMagnitude()
1547                    mgf.SetInputData(picr.GetOutput())
1548                    mgf.Update()
1549                    ima.AddInputData(mgf.GetOutput())
1550                    pb.print("loading...")
1551                ima.Update()
1552                img = ima.GetOutput()
1553
1554            else:
1555                if "ndarray" not in inputtype:
1556                    inputobj = np.array(inputobj)
1557
1558                if len(inputobj.shape) == 1:
1559                    varr = utils.numpy2vtk(inputobj, dtype=float)
1560                else:
1561                    if len(inputobj.shape) > 2:
1562                        inputobj = np.transpose(inputobj, axes=[2, 1, 0])
1563                    varr = utils.numpy2vtk(inputobj.ravel(order="F"), dtype=float)
1564                varr.SetName("input_scalars")
1565
1566                img = vtk.vtkImageData()
1567                img.SetDimensions(inputobj.shape)
1568                img.GetPointData().AddArray(varr)
1569                img.GetPointData().SetActiveScalars(varr.GetName())
1570
1571        elif "ImageData" in inputtype:
1572            img = inputobj
1573
1574        elif isinstance(inputobj, Volume):
1575            img = inputobj.inputdata()
1576
1577        elif "UniformGrid" in inputtype:
1578            img = inputobj
1579
1580        elif hasattr(inputobj, "GetOutput"):  # passing vtk object, try extract imagdedata
1581            if hasattr(inputobj, "Update"):
1582                inputobj.Update()
1583            img = inputobj.GetOutput()
1584
1585        elif isinstance(inputobj, str):
1586            if "https://" in inputobj:
1587                inputobj = vedo.file_io.download(inputobj, verbose=False)
1588            img = vedo.file_io.loadImageData(inputobj)
1589
1590        else:
1591            vedo.logger.error(f"cannot understand input type {inputtype}")
1592            return
1593
1594        self._data = img
1595        self._mapper.SetInputData(img)
1596        self.SetMapper(self._mapper)
1597
1598    def bounds(self):
1599        """Return the bounding box as [x0,x1, y0,y1, z0,z1]"""
1600        bns = [0, 0, 0, 0, 0, 0]
1601        self.GetBounds(bns)
1602        return bns
1603
1604    def colorize(self, lut=None, fix_scalar_range=False):
1605        """
1606        Assign a LUT (Look Up Table) to colorize the slice, leave it `None`
1607        to reuse an existing Volume color map.
1608        Use "bw" for automatic black and white.
1609        """
1610        if lut is None and self.lut:
1611            self.property.SetLookupTable(self.lut)
1612        elif isinstance(lut, vtk.vtkLookupTable):
1613            self.property.SetLookupTable(lut)
1614        elif lut == "bw":
1615            self.property.SetLookupTable(None)
1616        self.property.SetUseLookupTableScalarRange(fix_scalar_range)
1617        return self
1618
1619    def alpha(self, value):
1620        """Set opacity to the slice"""
1621        self.property.SetOpacity(value)
1622        return self
1623
1624    def auto_adjust_quality(self, value=True):
1625        """Automatically reduce the rendering quality for greater speed when interacting"""
1626        self._mapper.SetAutoAdjustImageQuality(value)
1627        return self
1628
1629    def slab(self, thickness=0, mode=0, sample_factor=2):
1630        """
1631        Make a thick slice (slab).
1632
1633        Arguments:
1634            thickness : (float)
1635                set the slab thickness, for thick slicing
1636            mode : (int)
1637                The slab type:
1638                    0 = min
1639                    1 = max
1640                    2 = mean
1641                    3 = sum
1642            sample_factor : (float)
1643                Set the number of slab samples to use as a factor of the number of input slices
1644                within the slab thickness. The default value is 2, but 1 will increase speed
1645                with very little loss of quality.
1646        """
1647        self._mapper.SetSlabThickness(thickness)
1648        self._mapper.SetSlabType(mode)
1649        self._mapper.SetSlabSampleFactor(sample_factor)
1650        return self
1651
1652    def face_camera(self, value=True):
1653        """Make the slice always face the camera or not."""
1654        self._mapper.SetSliceFacesCameraOn(value)
1655        return self
1656
1657    def jump_to_nearest_slice(self, value=True):
1658        """
1659        This causes the slicing to occur at the closest slice to the focal point,
1660        instead of the default behavior where a new slice is interpolated between
1661        the original slices.
1662        Nothing happens if the plane is oblique to the original slices."""
1663        self.SetJumpToNearestSlice(value)
1664        return self
1665
1666    def fill_background(self, value=True):
1667        """
1668        Instead of rendering only to the image border,
1669        render out to the viewport boundary with the background color.
1670        The background color will be the lowest color on the lookup
1671        table that is being used for the image."""
1672        self._mapper.SetBackground(value)
1673        return self
1674
1675    def lighting(self, window, level, ambient=1.0, diffuse=0.0):
1676        """Assign the values for window and color level."""
1677        self.property.SetColorWindow(window)
1678        self.property.SetColorLevel(level)
1679        self.property.SetAmbient(ambient)
1680        self.property.SetDiffuse(diffuse)
1681        return self
class BaseVolume:
 31class BaseVolume:
 32    """
 33    Base class. Do not instantiate.
 34    """
 35
 36    def __init__(self):
 37        """Base class. Do not instantiate."""
 38
 39        self._data = None
 40        self._mapper = None
 41        self.transform = None
 42        self.pipeline = None
 43
 44    def _repr_html_(self):
 45        """
 46        HTML representation of the Volume object for Jupyter Notebooks.
 47
 48        Returns:
 49            HTML text with the image and some properties.
 50        """
 51        import io
 52        import base64
 53        from PIL import Image
 54
 55        library_name = "vedo.volume.Volume"
 56        help_url = "https://vedo.embl.es/docs/vedo/volume.html"
 57
 58        arr = self.thumbnail(azimuth=0, elevation=-60, zoom=1.4, axes=True)
 59
 60        im = Image.fromarray(arr)
 61        buffered = io.BytesIO()
 62        im.save(buffered, format="PNG", quality=100)
 63        encoded = base64.b64encode(buffered.getvalue()).decode("utf-8")
 64        url = "data:image/png;base64," + encoded
 65        image = f"<img src='{url}'></img>"
 66
 67        # statisitics
 68        bounds = "<br/>".join(
 69            [
 70                utils.precision(min_x, 4) + " ... " + utils.precision(max_x, 4)
 71                for min_x, max_x in zip(self.bounds()[::2], self.bounds()[1::2])
 72            ]
 73        )
 74
 75        help_text = ""
 76        if self.name:
 77            help_text += f"<b> {self.name}: &nbsp&nbsp</b>"
 78        help_text += '<b><a href="' + help_url + '" target="_blank">' + library_name + "</a></b>"
 79        if self.filename:
 80            dots = ""
 81            if len(self.filename) > 30:
 82                dots = "..."
 83            help_text += f"<br/><code><i>({dots}{self.filename[-30:]})</i></code>"
 84
 85        pdata = ""
 86        if self._data.GetPointData().GetScalars():
 87            if self._data.GetPointData().GetScalars().GetName():
 88                name = self._data.GetPointData().GetScalars().GetName()
 89                pdata = "<tr><td><b> point data array </b></td><td>" + name + "</td></tr>"
 90
 91        cdata = ""
 92        if self._data.GetCellData().GetScalars():
 93            if self._data.GetCellData().GetScalars().GetName():
 94                name = self._data.GetCellData().GetScalars().GetName()
 95                cdata = "<tr><td><b> voxel data array </b></td><td>" + name + "</td></tr>"
 96
 97        img = self.GetMapper().GetInput()
 98
 99        allt = [
100            "<table>",
101            "<tr>",
102            "<td>",
103            image,
104            "</td>",
105            "<td style='text-align: center; vertical-align: center;'><br/>",
106            help_text,
107            "<table>",
108            "<tr><td><b> bounds </b> <br/> (x/y/z) </td><td>" + str(bounds) + "</td></tr>",
109            "<tr><td><b> dimensions </b></td><td>" + str(img.GetDimensions()) + "</td></tr>",
110            "<tr><td><b> voxel spacing </b></td><td>"
111            + utils.precision(img.GetSpacing(), 3)
112            + "</td></tr>",
113            "<tr><td><b> in memory size </b></td><td>"
114            + str(int(img.GetActualMemorySize() / 1024))
115            + "MB</td></tr>",
116            pdata,
117            cdata,
118            "<tr><td><b> scalar range </b></td><td>"
119            + utils.precision(img.GetScalarRange(), 4)
120            + "</td></tr>",
121            "</table>",
122            "</table>",
123        ]
124        return "\n".join(allt)
125
126    def _update(self, img):
127        self._data = img
128        self._data.GetPointData().Modified()
129        self._mapper.SetInputData(img)
130        self._mapper.Modified()
131        self._mapper.Update()
132        return self
133
134    def clone(self):
135        """Return a clone copy of the Volume."""
136        newimg = vtk.vtkImageData()
137        newimg.CopyStructure(self._data)
138        newimg.CopyAttributes(self._data)
139
140        newvol = Volume(newimg)
141        prop = vtk.vtkVolumeProperty()
142        prop.DeepCopy(self.GetProperty())
143        newvol.SetProperty(prop)
144        newvol.SetOrigin(self.GetOrigin())
145        newvol.SetScale(self.GetScale())
146        newvol.SetOrientation(self.GetOrientation())
147        newvol.SetPosition(self.GetPosition())
148        newvol.pipeline = utils.OperationNode("clone", parents=[self], c="#bbd0ff", shape="diamond")
149        return newvol
150
151    def imagedata(self):
152        """Return the underlying `vtkImagaData` object."""
153        return self._data
154
155    def tonumpy(self):
156        """
157        Get read-write access to voxels of a Volume object as a numpy array.
158
159        When you set values in the output image, you don't want numpy to reallocate the array
160        but instead set values in the existing array, so use the [:] operator.
161
162        Example:
163            `arr[:] = arr*2 + 15`
164
165        If the array is modified add a call to:
166        `volume.imagedata().GetPointData().GetScalars().Modified()`
167        when all your modifications are completed.
168        """
169        narray_shape = tuple(reversed(self._data.GetDimensions()))
170
171        scals = self._data.GetPointData().GetScalars()
172        comps = scals.GetNumberOfComponents()
173        if comps == 1:
174            narray = utils.vtk2numpy(scals).reshape(narray_shape)
175            narray = np.transpose(narray, axes=[2, 1, 0])
176        else:
177            narray = utils.vtk2numpy(scals).reshape(*narray_shape, comps)
178            narray = np.transpose(narray, axes=[2, 1, 0, 3])
179
180        # narray = utils.vtk2numpy(self._data.GetPointData().GetScalars()).reshape(narray_shape)
181        # narray = np.transpose(narray, axes=[2, 1, 0])
182
183        return narray
184
185    def dimensions(self):
186        """Return the nr. of voxels in the 3 dimensions."""
187        return np.array(self._data.GetDimensions())
188
189    def scalar_range(self):
190        """Return the range of the scalar values."""
191        return np.array(self._data.GetScalarRange())
192
193    def spacing(self, s=None):
194        """Set/get the voxels size in the 3 dimensions."""
195        if s is not None:
196            self._data.SetSpacing(s)
197            return self
198        return np.array(self._data.GetSpacing())
199
200    # def pos(self, p=None): # conflicts with API of base.x()
201    #     """Same effect as calling `origin()`."""
202    #     return self.origin(p)
203
204    def origin(self, s=None):
205        """Set/get the origin of the volumetric dataset."""
206        ### supersedes base.origin()
207        ### DIFFERENT from base.origin()!
208        if s is not None:
209            self._data.SetOrigin(s)
210            return self
211        return np.array(self._data.GetOrigin())
212
213    def center(self, p=None):
214        """Set/get the center of the volumetric dataset."""
215        if p is not None:
216            self._data.SetCenter(p)
217            return self
218        return np.array(self._data.GetCenter())
219
220    def permute_axes(self, x, y, z):
221        """
222        Reorder the axes of the Volume by specifying
223        the input axes which are supposed to become the new X, Y, and Z.
224        """
225        imp = vtk.vtkImagePermute()
226        imp.SetFilteredAxes(x, y, z)
227        imp.SetInputData(self.imagedata())
228        imp.Update()
229        self._update(imp.GetOutput())
230        self.pipeline = utils.OperationNode(
231            f"permute_axes\n{(x,y,z)}", parents=[self], c="#4cc9f0"
232        )
233        return self
234
235    def resample(self, new_spacing, interpolation=1):
236        """
237        Resamples a `Volume` to be larger or smaller.
238
239        This method modifies the spacing of the input.
240        Linear interpolation is used to resample the data.
241
242        Arguments:
243            new_spacing : (list)
244                a list of 3 new spacings for the 3 axes
245            interpolation : (int)
246                0=nearest_neighbor, 1=linear, 2=cubic
247        """
248        rsp = vtk.vtkImageResample()
249        oldsp = self.spacing()
250        for i in range(3):
251            if oldsp[i] != new_spacing[i]:
252                rsp.SetAxisOutputSpacing(i, new_spacing[i])
253        rsp.InterpolateOn()
254        rsp.SetInterpolationMode(interpolation)
255        rsp.OptimizationOn()
256        rsp.Update()
257        self._update(rsp.GetOutput())
258        self.pipeline = utils.OperationNode(
259            f"resample\n{tuple(new_spacing)}", parents=[self], c="#4cc9f0"
260        )
261        return self
262
263    def interpolation(self, itype):
264        """
265        Set interpolation type.
266
267        0=nearest neighbour, 1=linear
268        """
269        self.property.SetInterpolationType(itype)
270        return self
271
272    def threshold(self, above=None, below=None, replace=None, replace_value=None):
273        """
274        Binary or continuous volume thresholding.
275        Find the voxels that contain a value above/below the input values
276        and replace them with a new value (default is 0).
277        """
278        th = vtk.vtkImageThreshold()
279        th.SetInputData(self.imagedata())
280
281        # sanity checks
282        if above is not None and below is not None:
283            if above == below:
284                return self
285            if above > below:
286                vedo.logger.warning("in volume.threshold(), above > below, skip.")
287                return self
288
289        ## cases
290        if below is not None and above is not None:
291            th.ThresholdBetween(above, below)
292
293        elif above is not None:
294            th.ThresholdByUpper(above)
295
296        elif below is not None:
297            th.ThresholdByLower(below)
298
299        ##
300        if replace is not None:
301            th.SetReplaceIn(True)
302            th.SetInValue(replace)
303        else:
304            th.SetReplaceIn(False)
305
306        if replace_value is not None:
307            th.SetReplaceOut(True)
308            th.SetOutValue(replace_value)
309        else:
310            th.SetReplaceOut(False)
311
312        th.Update()
313        self._update(th.GetOutput())
314        self.pipeline = utils.OperationNode("threshold", parents=[self], c="#4cc9f0")
315        return self
316
317    def crop(self, left=None, right=None, back=None, front=None, bottom=None, top=None, VOI=()):
318        """
319        Crop a `Volume` object.
320
321        Arguments:
322            left : (float)
323                fraction to crop from the left plane (negative x)
324            right : (float)
325                fraction to crop from the right plane (positive x)
326            back : (float)
327                fraction to crop from the back plane (negative y)
328            front : (float)
329                fraction to crop from the front plane (positive y)
330            bottom : (float)
331                fraction to crop from the bottom plane (negative z)
332            top : (float)
333                fraction to crop from the top plane (positive z)
334            VOI : (list)
335                extract Volume Of Interest expressed in voxel numbers
336
337        Example:
338            `vol.crop(VOI=(xmin, xmax, ymin, ymax, zmin, zmax)) # all integers nrs`
339        """
340        extractVOI = vtk.vtkExtractVOI()
341        extractVOI.SetInputData(self.imagedata())
342
343        if VOI:
344            extractVOI.SetVOI(VOI)
345        else:
346            d = self.imagedata().GetDimensions()
347            bx0, bx1, by0, by1, bz0, bz1 = 0, d[0]-1, 0, d[1]-1, 0, d[2]-1
348            if left is not None:   bx0 = int((d[0]-1)*left)
349            if right is not None:  bx1 = int((d[0]-1)*(1-right))
350            if back is not None:   by0 = int((d[1]-1)*back)
351            if front is not None:  by1 = int((d[1]-1)*(1-front))
352            if bottom is not None: bz0 = int((d[2]-1)*bottom)
353            if top is not None:    bz1 = int((d[2]-1)*(1-top))
354            extractVOI.SetVOI(bx0, bx1, by0, by1, bz0, bz1)
355        extractVOI.Update()
356        self._update(extractVOI.GetOutput())
357
358        self.pipeline = utils.OperationNode(
359            "crop", parents=[self], c="#4cc9f0", comment=f"dims={tuple(self.dimensions())}"
360        )
361        return self
362
363    def append(self, volumes, axis="z", preserve_extents=False):
364        """
365        Take the components from multiple inputs and merges them into one output.
366        Except for the append axis, all inputs must have the same extent.
367        All inputs must have the same number of scalar components.
368        The output has the same origin and spacing as the first input.
369        The origin and spacing of all other inputs are ignored.
370        All inputs must have the same scalar type.
371
372        Arguments:
373            axis : (int, str)
374                axis expanded to hold the multiple images
375            preserve_extents : (bool)
376                if True, the extent of the inputs is used to place
377                the image in the output. The whole extent of the output is the union of the input
378                whole extents. Any portion of the output not covered by the inputs is set to zero.
379                The origin and spacing is taken from the first input.
380
381        Example:
382            ```python
383            from vedo import Volume, dataurl
384            vol = Volume(dataurl+'embryo.tif')
385            vol.append(vol, axis='x').show().close()
386            ```
387            ![](https://vedo.embl.es/images/feats/volume_append.png)
388        """
389        ima = vtk.vtkImageAppend()
390        ima.SetInputData(self.imagedata())
391        if not utils.is_sequence(volumes):
392            volumes = [volumes]
393        for volume in volumes:
394            if isinstance(volume, vtk.vtkImageData):
395                ima.AddInputData(volume)
396            else:
397                ima.AddInputData(volume.imagedata())
398        ima.SetPreserveExtents(preserve_extents)
399        if axis == "x":
400            axis = 0
401        elif axis == "y":
402            axis = 1
403        elif axis == "z":
404            axis = 2
405        ima.SetAppendAxis(axis)
406        ima.Update()
407        self._update(ima.GetOutput())
408
409        self.pipeline = utils.OperationNode(
410            "append",
411            parents=[self, *volumes],
412            c="#4cc9f0",
413            comment=f"dims={tuple(self.dimensions())}",
414        )
415        return self
416
417    def pad(self, voxels=10, value=0):
418        """
419        Add the specified number of voxels at the `Volume` borders.
420        Voxels can be a list formatted as `[nx0, nx1, ny0, ny1, nz0, nz1]`.
421
422        Arguments:
423            voxels : (int, list)
424                number of voxels to be added (or a list of length 4)
425            value : (int)
426                intensity value (gray-scale color) of the padding
427        
428        Example:
429            ```python
430            from vedo import Volume, dataurl, show
431            iso = Volume(dataurl+'embryo.tif').isosurface()
432            vol = iso.binarize(spacing=(100, 100, 100)).pad(10)
433            vol.dilate([15,15,15])
434            show(iso, vol.isosurface(), N=2, axes=1)
435            ```
436            ![](https://vedo.embl.es/images/volumetric/volume_pad.png)
437        """
438        x0, x1, y0, y1, z0, z1 = self._data.GetExtent()
439        pf = vtk.vtkImageConstantPad()
440        pf.SetInputData(self._data)
441        pf.SetConstant(value)
442        if utils.is_sequence(voxels):
443            pf.SetOutputWholeExtent(
444                x0 - voxels[0], x1 + voxels[1], 
445                y0 - voxels[2], y1 + voxels[3], 
446                z0 - voxels[4], z1 + voxels[5], 
447            )
448        else:
449            pf.SetOutputWholeExtent(
450                x0 - voxels, x1 + voxels, 
451                y0 - voxels, y1 + voxels, 
452                z0 - voxels, z1 + voxels,
453            )
454        pf.Update()
455        self._update(pf.GetOutput())
456        self.pipeline = utils.OperationNode(
457            "pad", comment=f"{voxels} voxels", parents=[self], c="#f28482"
458        )
459        return self
460
461    def resize(self, *newdims):
462        """Increase or reduce the number of voxels of a Volume with interpolation."""
463        old_dims = np.array(self.imagedata().GetDimensions())
464        old_spac = np.array(self.imagedata().GetSpacing())
465        rsz = vtk.vtkImageResize()
466        rsz.SetResizeMethodToOutputDimensions()
467        rsz.SetInputData(self.imagedata())
468        rsz.SetOutputDimensions(newdims)
469        rsz.Update()
470        self._data = rsz.GetOutput()
471        new_spac = old_spac * old_dims / newdims  # keep aspect ratio
472        self._data.SetSpacing(new_spac)
473        self._update(self._data)
474        self.pipeline = utils.OperationNode(
475            "resize", parents=[self], c="#4cc9f0", comment=f"dims={tuple(self.dimensions())}"
476        )
477        return self
478
479    def normalize(self):
480        """Normalize that scalar components for each point."""
481        norm = vtk.vtkImageNormalize()
482        norm.SetInputData(self.imagedata())
483        norm.Update()
484        self._update(norm.GetOutput())
485        self.pipeline = utils.OperationNode("normalize", parents=[self], c="#4cc9f0")
486        return self
487
488    def mirror(self, axis="x"):
489        """
490        Mirror flip along one of the cartesian axes.
491        """
492        img = self.imagedata()
493
494        ff = vtk.vtkImageFlip()
495        ff.SetInputData(img)
496        if axis.lower() == "x":
497            ff.SetFilteredAxis(0)
498        elif axis.lower() == "y":
499            ff.SetFilteredAxis(1)
500        elif axis.lower() == "z":
501            ff.SetFilteredAxis(2)
502        else:
503            vedo.logger.error("mirror must be set to either x, y, z or n")
504            raise RuntimeError()
505        ff.Update()
506        self._update(ff.GetOutput())
507        self.pipeline = utils.OperationNode(f"mirror {axis}", parents=[self], c="#4cc9f0")
508        return self
509
510    def operation(self, operation, volume2=None):
511        """
512        Perform operations with `Volume` objects.
513        Keyword `volume2` can be a constant float.
514
515        Possible operations are:
516        ```
517        +, -, /, 1/x, sin, cos, exp, log,
518        abs, **2, sqrt, min, max, atan, atan2, median,
519        mag, dot, gradient, divergence, laplacian.
520        ```
521
522        Examples:
523            - [volume_operations.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/volume_operations.py)
524        """
525        op = operation.lower()
526        image1 = self._data
527
528        mf = None
529        if op in ["median"]:
530            mf = vtk.vtkImageMedian3D()
531            mf.SetInputData(image1)
532        elif op in ["mag"]:
533            mf = vtk.vtkImageMagnitude()
534            mf.SetInputData(image1)
535        elif op in ["dot", "dotproduct"]:
536            mf = vtk.vtkImageDotProduct()
537            mf.SetInput1Data(image1)
538            mf.SetInput2Data(volume2.imagedata())
539        elif op in ["grad", "gradient"]:
540            mf = vtk.vtkImageGradient()
541            mf.SetDimensionality(3)
542            mf.SetInputData(image1)
543        elif op in ["div", "divergence"]:
544            mf = vtk.vtkImageDivergence()
545            mf.SetInputData(image1)
546        elif op in ["laplacian"]:
547            mf = vtk.vtkImageLaplacian()
548            mf.SetDimensionality(3)
549            mf.SetInputData(image1)
550
551        if mf is not None:
552            mf.Update()
553            vol = Volume(mf.GetOutput())
554            vol.pipeline = utils.OperationNode(
555                f"operation\n{op}", parents=[self], c="#4cc9f0", shape="cylinder"
556            )
557            return vol  ###########################
558
559        mat = vtk.vtkImageMathematics()
560        mat.SetInput1Data(image1)
561
562        K = None
563
564        if utils.is_number(volume2):
565            K = volume2
566            mat.SetConstantK(K)
567            mat.SetConstantC(K)
568
569        elif volume2 is not None:  # assume image2 is a constant value
570            mat.SetInput2Data(volume2.imagedata())
571
572        # ###########################
573        if op in ["+", "add", "plus"]:
574            if K:
575                mat.SetOperationToAddConstant()
576            else:
577                mat.SetOperationToAdd()
578
579        elif op in ["-", "subtract", "minus"]:
580            if K:
581                mat.SetConstantC(-float(K))
582                mat.SetOperationToAddConstant()
583            else:
584                mat.SetOperationToSubtract()
585
586        elif op in ["*", "multiply", "times"]:
587            if K:
588                mat.SetOperationToMultiplyByK()
589            else:
590                mat.SetOperationToMultiply()
591
592        elif op in ["/", "divide"]:
593            if K:
594                mat.SetConstantK(1.0 / K)
595                mat.SetOperationToMultiplyByK()
596            else:
597                mat.SetOperationToDivide()
598
599        elif op in ["1/x", "invert"]:
600            mat.SetOperationToInvert()
601        elif op in ["sin"]:
602            mat.SetOperationToSin()
603        elif op in ["cos"]:
604            mat.SetOperationToCos()
605        elif op in ["exp"]:
606            mat.SetOperationToExp()
607        elif op in ["log"]:
608            mat.SetOperationToLog()
609        elif op in ["abs"]:
610            mat.SetOperationToAbsoluteValue()
611        elif op in ["**2", "square"]:
612            mat.SetOperationToSquare()
613        elif op in ["sqrt", "sqr"]:
614            mat.SetOperationToSquareRoot()
615        elif op in ["min"]:
616            mat.SetOperationToMin()
617        elif op in ["max"]:
618            mat.SetOperationToMax()
619        elif op in ["atan"]:
620            mat.SetOperationToATAN()
621        elif op in ["atan2"]:
622            mat.SetOperationToATAN2()
623        else:
624            vedo.logger.error(f"unknown operation {operation}")
625            raise RuntimeError()
626        mat.Update()
627
628        self._update(mat.GetOutput())
629
630        self.pipeline = utils.OperationNode(
631            f"operation\n{op}", parents=[self, volume2], shape="cylinder", c="#4cc9f0"
632        )
633        return self
634
635    def frequency_pass_filter(self, low_cutoff=None, high_cutoff=None, order=1):
636        """
637        Low-pass and high-pass filtering become trivial in the frequency domain.
638        A portion of the pixels/voxels are simply masked or attenuated.
639        This function applies a high pass Butterworth filter that attenuates the
640        frequency domain image.
641
642        The gradual attenuation of the filter is important.
643        A simple high-pass filter would simply mask a set of pixels in the frequency domain,
644        but the abrupt transition would cause a ringing effect in the spatial domain.
645
646        Arguments:
647            low_cutoff : (list)
648                the cutoff frequencies for x, y and z
649            high_cutoff : (list)
650                the cutoff frequencies for x, y and z
651            order : (int)
652                order determines sharpness of the cutoff curve
653        """
654        # https://lorensen.github.io/VTKExamples/site/Cxx/ImageProcessing/IdealHighPass
655        fft = vtk.vtkImageFFT()
656        fft.SetInputData(self._data)
657        fft.Update()
658        out = fft.GetOutput()
659
660        if high_cutoff:
661            blp = vtk.vtkImageButterworthLowPass()
662            blp.SetInputData(out)
663            blp.SetCutOff(high_cutoff)
664            blp.SetOrder(order)
665            blp.Update()
666            out = blp.GetOutput()
667
668        if low_cutoff:
669            bhp = vtk.vtkImageButterworthHighPass()
670            bhp.SetInputData(out)
671            bhp.SetCutOff(low_cutoff)
672            bhp.SetOrder(order)
673            bhp.Update()
674            out = bhp.GetOutput()
675
676        rfft = vtk.vtkImageRFFT()
677        rfft.SetInputData(out)
678        rfft.Update()
679
680        ecomp = vtk.vtkImageExtractComponents()
681        ecomp.SetInputData(rfft.GetOutput())
682        ecomp.SetComponents(0)
683        ecomp.Update()
684        self._update(ecomp.GetOutput())
685        self.pipeline = utils.OperationNode("frequency_pass_filter", parents=[self], c="#4cc9f0")
686        return self
687
688    def smooth_gaussian(self, sigma=(2, 2, 2), radius=None):
689        """
690        Performs a convolution of the input Volume with a gaussian.
691
692        Arguments:
693            sigma : (float, list)
694                standard deviation(s) in voxel units.
695                A list can be given to smooth in the three direction differently.
696            radius : (float, list)
697                radius factor(s) determine how far out the gaussian
698                kernel will go before being clamped to zero. A list can be given too.
699        """
700        gsf = vtk.vtkImageGaussianSmooth()
701        gsf.SetDimensionality(3)
702        gsf.SetInputData(self.imagedata())
703        if utils.is_sequence(sigma):
704            gsf.SetStandardDeviations(sigma)
705        else:
706            gsf.SetStandardDeviation(sigma)
707        if radius is not None:
708            if utils.is_sequence(radius):
709                gsf.SetRadiusFactors(radius)
710            else:
711                gsf.SetRadiusFactor(radius)
712        gsf.Update()
713        self._update(gsf.GetOutput())
714        self.pipeline = utils.OperationNode("smooth_gaussian", parents=[self], c="#4cc9f0")
715        return self
716
717    def smooth_median(self, neighbours=(2, 2, 2)):
718        """
719        Median filter that replaces each pixel with the median value
720        from a rectangular neighborhood around that pixel.
721        """
722        imgm = vtk.vtkImageMedian3D()
723        imgm.SetInputData(self.imagedata())
724        if utils.is_sequence(neighbours):
725            imgm.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
726        else:
727            imgm.SetKernelSize(neighbours, neighbours, neighbours)
728        imgm.Update()
729        self._update(imgm.GetOutput())
730        self.pipeline = utils.OperationNode("smooth_median", parents=[self], c="#4cc9f0")
731        return self
732
733    def erode(self, neighbours=(2, 2, 2)):
734        """
735        Replace a voxel with the minimum over an ellipsoidal neighborhood of voxels.
736        If `neighbours` of an axis is 1, no processing is done on that axis.
737
738        Examples:
739            - [erode_dilate.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/erode_dilate.py)
740
741                ![](https://vedo.embl.es/images/volumetric/erode_dilate.png)
742        """
743        ver = vtk.vtkImageContinuousErode3D()
744        ver.SetInputData(self._data)
745        ver.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
746        ver.Update()
747        self._update(ver.GetOutput())
748        self.pipeline = utils.OperationNode("erode", parents=[self], c="#4cc9f0")
749        return self
750
751    def dilate(self, neighbours=(2, 2, 2)):
752        """
753        Replace a voxel with the maximum over an ellipsoidal neighborhood of voxels.
754        If `neighbours` of an axis is 1, no processing is done on that axis.
755
756        Check also `erode()` and `pad()`.
757
758        Examples:
759            - [erode_dilate.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/erode_dilate.py)
760        """
761        ver = vtk.vtkImageContinuousDilate3D()
762        ver.SetInputData(self._data)
763        ver.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
764        ver.Update()
765        self._update(ver.GetOutput())
766        self.pipeline = utils.OperationNode("dilate", parents=[self], c="#4cc9f0")
767        return self
768
769    def magnitude(self):
770        """Colapses components with magnitude function."""
771        imgm = vtk.vtkImageMagnitude()
772        imgm.SetInputData(self.imagedata())
773        imgm.Update()
774        self._update(imgm.GetOutput())
775        self.pipeline = utils.OperationNode("magnitude", parents=[self], c="#4cc9f0")
776        return self
777
778    def topoints(self):
779        """
780        Extract all image voxels as points.
781        This function takes an input `Volume` and creates an `Mesh`
782        that contains the points and the point attributes.
783
784        Examples:
785            - [vol2points.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/vol2points.py)
786        """
787        v2p = vtk.vtkImageToPoints()
788        v2p.SetInputData(self.imagedata())
789        v2p.Update()
790        mpts = vedo.Points(v2p.GetOutput())
791        mpts.pipeline = utils.OperationNode("topoints", parents=[self], c="#4cc9f0:#e9c46a")
792        return mpts
793
794    def euclidean_distance(self, anisotropy=False, max_distance=None):
795        """
796        Implementation of the Euclidean DT (Distance Transform) using Saito's algorithm.
797        The distance map produced contains the square of the Euclidean distance values.
798        The algorithm has a O(n^(D+1)) complexity over n x n x...x n images in D dimensions.
799
800        Check out also: https://en.wikipedia.org/wiki/Distance_transform
801
802        Arguments:
803            anisotropy : bool
804                used to define whether Spacing should be used in the
805                computation of the distances.
806            max_distance : bool
807                any distance bigger than max_distance will not be
808                computed but set to this specified value instead.
809
810        Examples:
811            - [euclidian_dist.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/euclidian_dist.py)
812        """
813        euv = vtk.vtkImageEuclideanDistance()
814        euv.SetInputData(self._data)
815        euv.SetConsiderAnisotropy(anisotropy)
816        if max_distance is not None:
817            euv.InitializeOn()
818            euv.SetMaximumDistance(max_distance)
819        euv.SetAlgorithmToSaito()
820        euv.Update()
821        vol = Volume(euv.GetOutput())
822        vol.pipeline = utils.OperationNode("euclidean_distance", parents=[self], c="#4cc9f0")
823        return vol
824
825    def correlation_with(self, vol2, dim=2):
826        """
827        Find the correlation between two volumetric data sets.
828        Keyword `dim` determines whether the correlation will be 3D, 2D or 1D.
829        The default is a 2D Correlation.
830
831        The output size will match the size of the first input.
832        The second input is considered the correlation kernel.
833        """
834        imc = vtk.vtkImageCorrelation()
835        imc.SetInput1Data(self._data)
836        imc.SetInput2Data(vol2.imagedata())
837        imc.SetDimensionality(dim)
838        imc.Update()
839        vol = Volume(imc.GetOutput())
840
841        vol.pipeline = utils.OperationNode("correlation_with", parents=[self, vol2], c="#4cc9f0")
842        return vol
843
844    def scale_voxels(self, scale=1):
845        """Scale the voxel content by factor `scale`."""
846        rsl = vtk.vtkImageReslice()
847        rsl.SetInputData(self.imagedata())
848        rsl.SetScalarScale(scale)
849        rsl.Update()
850        self._update(rsl.GetOutput())
851        self.pipeline = utils.OperationNode(
852            f"scale_voxels\nscale={scale}", parents=[self], c="#4cc9f0"
853        )
854        return self

Base class. Do not instantiate.

BaseVolume()
36    def __init__(self):
37        """Base class. Do not instantiate."""
38
39        self._data = None
40        self._mapper = None
41        self.transform = None
42        self.pipeline = None

Base class. Do not instantiate.

def clone(self):
134    def clone(self):
135        """Return a clone copy of the Volume."""
136        newimg = vtk.vtkImageData()
137        newimg.CopyStructure(self._data)
138        newimg.CopyAttributes(self._data)
139
140        newvol = Volume(newimg)
141        prop = vtk.vtkVolumeProperty()
142        prop.DeepCopy(self.GetProperty())
143        newvol.SetProperty(prop)
144        newvol.SetOrigin(self.GetOrigin())
145        newvol.SetScale(self.GetScale())
146        newvol.SetOrientation(self.GetOrientation())
147        newvol.SetPosition(self.GetPosition())
148        newvol.pipeline = utils.OperationNode("clone", parents=[self], c="#bbd0ff", shape="diamond")
149        return newvol

Return a clone copy of the Volume.

def imagedata(self):
151    def imagedata(self):
152        """Return the underlying `vtkImagaData` object."""
153        return self._data

Return the underlying vtkImagaData object.

def tonumpy(self):
155    def tonumpy(self):
156        """
157        Get read-write access to voxels of a Volume object as a numpy array.
158
159        When you set values in the output image, you don't want numpy to reallocate the array
160        but instead set values in the existing array, so use the [:] operator.
161
162        Example:
163            `arr[:] = arr*2 + 15`
164
165        If the array is modified add a call to:
166        `volume.imagedata().GetPointData().GetScalars().Modified()`
167        when all your modifications are completed.
168        """
169        narray_shape = tuple(reversed(self._data.GetDimensions()))
170
171        scals = self._data.GetPointData().GetScalars()
172        comps = scals.GetNumberOfComponents()
173        if comps == 1:
174            narray = utils.vtk2numpy(scals).reshape(narray_shape)
175            narray = np.transpose(narray, axes=[2, 1, 0])
176        else:
177            narray = utils.vtk2numpy(scals).reshape(*narray_shape, comps)
178            narray = np.transpose(narray, axes=[2, 1, 0, 3])
179
180        # narray = utils.vtk2numpy(self._data.GetPointData().GetScalars()).reshape(narray_shape)
181        # narray = np.transpose(narray, axes=[2, 1, 0])
182
183        return narray

Get read-write access to voxels of a Volume object as a numpy array.

When you set values in the output image, you don't want numpy to reallocate the array but instead set values in the existing array, so use the [:] operator.

Example:

arr[:] = arr*2 + 15

If the array is modified add a call to: volume.imagedata().GetPointData().GetScalars().Modified() when all your modifications are completed.

def dimensions(self):
185    def dimensions(self):
186        """Return the nr. of voxels in the 3 dimensions."""
187        return np.array(self._data.GetDimensions())

Return the nr. of voxels in the 3 dimensions.

def scalar_range(self):
189    def scalar_range(self):
190        """Return the range of the scalar values."""
191        return np.array(self._data.GetScalarRange())

Return the range of the scalar values.

def spacing(self, s=None):
193    def spacing(self, s=None):
194        """Set/get the voxels size in the 3 dimensions."""
195        if s is not None:
196            self._data.SetSpacing(s)
197            return self
198        return np.array(self._data.GetSpacing())

Set/get the voxels size in the 3 dimensions.

def origin(self, s=None):
204    def origin(self, s=None):
205        """Set/get the origin of the volumetric dataset."""
206        ### supersedes base.origin()
207        ### DIFFERENT from base.origin()!
208        if s is not None:
209            self._data.SetOrigin(s)
210            return self
211        return np.array(self._data.GetOrigin())

Set/get the origin of the volumetric dataset.

def center(self, p=None):
213    def center(self, p=None):
214        """Set/get the center of the volumetric dataset."""
215        if p is not None:
216            self._data.SetCenter(p)
217            return self
218        return np.array(self._data.GetCenter())

Set/get the center of the volumetric dataset.

def permute_axes(self, x, y, z):
220    def permute_axes(self, x, y, z):
221        """
222        Reorder the axes of the Volume by specifying
223        the input axes which are supposed to become the new X, Y, and Z.
224        """
225        imp = vtk.vtkImagePermute()
226        imp.SetFilteredAxes(x, y, z)
227        imp.SetInputData(self.imagedata())
228        imp.Update()
229        self._update(imp.GetOutput())
230        self.pipeline = utils.OperationNode(
231            f"permute_axes\n{(x,y,z)}", parents=[self], c="#4cc9f0"
232        )
233        return self

Reorder the axes of the Volume by specifying the input axes which are supposed to become the new X, Y, and Z.

def resample(self, new_spacing, interpolation=1):
235    def resample(self, new_spacing, interpolation=1):
236        """
237        Resamples a `Volume` to be larger or smaller.
238
239        This method modifies the spacing of the input.
240        Linear interpolation is used to resample the data.
241
242        Arguments:
243            new_spacing : (list)
244                a list of 3 new spacings for the 3 axes
245            interpolation : (int)
246                0=nearest_neighbor, 1=linear, 2=cubic
247        """
248        rsp = vtk.vtkImageResample()
249        oldsp = self.spacing()
250        for i in range(3):
251            if oldsp[i] != new_spacing[i]:
252                rsp.SetAxisOutputSpacing(i, new_spacing[i])
253        rsp.InterpolateOn()
254        rsp.SetInterpolationMode(interpolation)
255        rsp.OptimizationOn()
256        rsp.Update()
257        self._update(rsp.GetOutput())
258        self.pipeline = utils.OperationNode(
259            f"resample\n{tuple(new_spacing)}", parents=[self], c="#4cc9f0"
260        )
261        return self

Resamples a Volume to be larger or smaller.

This method modifies the spacing of the input. Linear interpolation is used to resample the data.

Arguments:
  • new_spacing : (list) a list of 3 new spacings for the 3 axes
  • interpolation : (int) 0=nearest_neighbor, 1=linear, 2=cubic
def interpolation(self, itype):
263    def interpolation(self, itype):
264        """
265        Set interpolation type.
266
267        0=nearest neighbour, 1=linear
268        """
269        self.property.SetInterpolationType(itype)
270        return self

Set interpolation type.

0=nearest neighbour, 1=linear

def threshold(self, above=None, below=None, replace=None, replace_value=None):
272    def threshold(self, above=None, below=None, replace=None, replace_value=None):
273        """
274        Binary or continuous volume thresholding.
275        Find the voxels that contain a value above/below the input values
276        and replace them with a new value (default is 0).
277        """
278        th = vtk.vtkImageThreshold()
279        th.SetInputData(self.imagedata())
280
281        # sanity checks
282        if above is not None and below is not None:
283            if above == below:
284                return self
285            if above > below:
286                vedo.logger.warning("in volume.threshold(), above > below, skip.")
287                return self
288
289        ## cases
290        if below is not None and above is not None:
291            th.ThresholdBetween(above, below)
292
293        elif above is not None:
294            th.ThresholdByUpper(above)
295
296        elif below is not None:
297            th.ThresholdByLower(below)
298
299        ##
300        if replace is not None:
301            th.SetReplaceIn(True)
302            th.SetInValue(replace)
303        else:
304            th.SetReplaceIn(False)
305
306        if replace_value is not None:
307            th.SetReplaceOut(True)
308            th.SetOutValue(replace_value)
309        else:
310            th.SetReplaceOut(False)
311
312        th.Update()
313        self._update(th.GetOutput())
314        self.pipeline = utils.OperationNode("threshold", parents=[self], c="#4cc9f0")
315        return self

Binary or continuous volume thresholding. Find the voxels that contain a value above/below the input values and replace them with a new value (default is 0).

def crop( self, left=None, right=None, back=None, front=None, bottom=None, top=None, VOI=()):
317    def crop(self, left=None, right=None, back=None, front=None, bottom=None, top=None, VOI=()):
318        """
319        Crop a `Volume` object.
320
321        Arguments:
322            left : (float)
323                fraction to crop from the left plane (negative x)
324            right : (float)
325                fraction to crop from the right plane (positive x)
326            back : (float)
327                fraction to crop from the back plane (negative y)
328            front : (float)
329                fraction to crop from the front plane (positive y)
330            bottom : (float)
331                fraction to crop from the bottom plane (negative z)
332            top : (float)
333                fraction to crop from the top plane (positive z)
334            VOI : (list)
335                extract Volume Of Interest expressed in voxel numbers
336
337        Example:
338            `vol.crop(VOI=(xmin, xmax, ymin, ymax, zmin, zmax)) # all integers nrs`
339        """
340        extractVOI = vtk.vtkExtractVOI()
341        extractVOI.SetInputData(self.imagedata())
342
343        if VOI:
344            extractVOI.SetVOI(VOI)
345        else:
346            d = self.imagedata().GetDimensions()
347            bx0, bx1, by0, by1, bz0, bz1 = 0, d[0]-1, 0, d[1]-1, 0, d[2]-1
348            if left is not None:   bx0 = int((d[0]-1)*left)
349            if right is not None:  bx1 = int((d[0]-1)*(1-right))
350            if back is not None:   by0 = int((d[1]-1)*back)
351            if front is not None:  by1 = int((d[1]-1)*(1-front))
352            if bottom is not None: bz0 = int((d[2]-1)*bottom)
353            if top is not None:    bz1 = int((d[2]-1)*(1-top))
354            extractVOI.SetVOI(bx0, bx1, by0, by1, bz0, bz1)
355        extractVOI.Update()
356        self._update(extractVOI.GetOutput())
357
358        self.pipeline = utils.OperationNode(
359            "crop", parents=[self], c="#4cc9f0", comment=f"dims={tuple(self.dimensions())}"
360        )
361        return self

Crop a Volume object.

Arguments:
  • left : (float) fraction to crop from the left plane (negative x)
  • right : (float) fraction to crop from the right plane (positive x)
  • back : (float) fraction to crop from the back plane (negative y)
  • front : (float) fraction to crop from the front plane (positive y)
  • bottom : (float) fraction to crop from the bottom plane (negative z)
  • top : (float) fraction to crop from the top plane (positive z)
  • VOI : (list) extract Volume Of Interest expressed in voxel numbers
Example:

vol.crop(VOI=(xmin, xmax, ymin, ymax, zmin, zmax)) # all integers nrs

def append(self, volumes, axis='z', preserve_extents=False):
363    def append(self, volumes, axis="z", preserve_extents=False):
364        """
365        Take the components from multiple inputs and merges them into one output.
366        Except for the append axis, all inputs must have the same extent.
367        All inputs must have the same number of scalar components.
368        The output has the same origin and spacing as the first input.
369        The origin and spacing of all other inputs are ignored.
370        All inputs must have the same scalar type.
371
372        Arguments:
373            axis : (int, str)
374                axis expanded to hold the multiple images
375            preserve_extents : (bool)
376                if True, the extent of the inputs is used to place
377                the image in the output. The whole extent of the output is the union of the input
378                whole extents. Any portion of the output not covered by the inputs is set to zero.
379                The origin and spacing is taken from the first input.
380
381        Example:
382            ```python
383            from vedo import Volume, dataurl
384            vol = Volume(dataurl+'embryo.tif')
385            vol.append(vol, axis='x').show().close()
386            ```
387            ![](https://vedo.embl.es/images/feats/volume_append.png)
388        """
389        ima = vtk.vtkImageAppend()
390        ima.SetInputData(self.imagedata())
391        if not utils.is_sequence(volumes):
392            volumes = [volumes]
393        for volume in volumes:
394            if isinstance(volume, vtk.vtkImageData):
395                ima.AddInputData(volume)
396            else:
397                ima.AddInputData(volume.imagedata())
398        ima.SetPreserveExtents(preserve_extents)
399        if axis == "x":
400            axis = 0
401        elif axis == "y":
402            axis = 1
403        elif axis == "z":
404            axis = 2
405        ima.SetAppendAxis(axis)
406        ima.Update()
407        self._update(ima.GetOutput())
408
409        self.pipeline = utils.OperationNode(
410            "append",
411            parents=[self, *volumes],
412            c="#4cc9f0",
413            comment=f"dims={tuple(self.dimensions())}",
414        )
415        return self

Take the components from multiple inputs and merges them into one output. Except for the append axis, all inputs must have the same extent. All inputs must have the same number of scalar components. The output has the same origin and spacing as the first input. The origin and spacing of all other inputs are ignored. All inputs must have the same scalar type.

Arguments:
  • axis : (int, str) axis expanded to hold the multiple images
  • preserve_extents : (bool) if True, the extent of the inputs is used to place the image in the output. The whole extent of the output is the union of the input whole extents. Any portion of the output not covered by the inputs is set to zero. The origin and spacing is taken from the first input.
Example:
from vedo import Volume, dataurl
vol = Volume(dataurl+'embryo.tif')
vol.append(vol, axis='x').show().close()

def pad(self, voxels=10, value=0):
417    def pad(self, voxels=10, value=0):
418        """
419        Add the specified number of voxels at the `Volume` borders.
420        Voxels can be a list formatted as `[nx0, nx1, ny0, ny1, nz0, nz1]`.
421
422        Arguments:
423            voxels : (int, list)
424                number of voxels to be added (or a list of length 4)
425            value : (int)
426                intensity value (gray-scale color) of the padding
427        
428        Example:
429            ```python
430            from vedo import Volume, dataurl, show
431            iso = Volume(dataurl+'embryo.tif').isosurface()
432            vol = iso.binarize(spacing=(100, 100, 100)).pad(10)
433            vol.dilate([15,15,15])
434            show(iso, vol.isosurface(), N=2, axes=1)
435            ```
436            ![](https://vedo.embl.es/images/volumetric/volume_pad.png)
437        """
438        x0, x1, y0, y1, z0, z1 = self._data.GetExtent()
439        pf = vtk.vtkImageConstantPad()
440        pf.SetInputData(self._data)
441        pf.SetConstant(value)
442        if utils.is_sequence(voxels):
443            pf.SetOutputWholeExtent(
444                x0 - voxels[0], x1 + voxels[1], 
445                y0 - voxels[2], y1 + voxels[3], 
446                z0 - voxels[4], z1 + voxels[5], 
447            )
448        else:
449            pf.SetOutputWholeExtent(
450                x0 - voxels, x1 + voxels, 
451                y0 - voxels, y1 + voxels, 
452                z0 - voxels, z1 + voxels,
453            )
454        pf.Update()
455        self._update(pf.GetOutput())
456        self.pipeline = utils.OperationNode(
457            "pad", comment=f"{voxels} voxels", parents=[self], c="#f28482"
458        )
459        return self

Add the specified number of voxels at the Volume borders. Voxels can be a list formatted as [nx0, nx1, ny0, ny1, nz0, nz1].

Arguments:
  • voxels : (int, list) number of voxels to be added (or a list of length 4)
  • value : (int) intensity value (gray-scale color) of the padding
Example:
from vedo import Volume, dataurl, show
iso = Volume(dataurl+'embryo.tif').isosurface()
vol = iso.binarize(spacing=(100, 100, 100)).pad(10)
vol.dilate([15,15,15])
show(iso, vol.isosurface(), N=2, axes=1)

def resize(self, *newdims):
461    def resize(self, *newdims):
462        """Increase or reduce the number of voxels of a Volume with interpolation."""
463        old_dims = np.array(self.imagedata().GetDimensions())
464        old_spac = np.array(self.imagedata().GetSpacing())
465        rsz = vtk.vtkImageResize()
466        rsz.SetResizeMethodToOutputDimensions()
467        rsz.SetInputData(self.imagedata())
468        rsz.SetOutputDimensions(newdims)
469        rsz.Update()
470        self._data = rsz.GetOutput()
471        new_spac = old_spac * old_dims / newdims  # keep aspect ratio
472        self._data.SetSpacing(new_spac)
473        self._update(self._data)
474        self.pipeline = utils.OperationNode(
475            "resize", parents=[self], c="#4cc9f0", comment=f"dims={tuple(self.dimensions())}"
476        )
477        return self

Increase or reduce the number of voxels of a Volume with interpolation.

def normalize(self):
479    def normalize(self):
480        """Normalize that scalar components for each point."""
481        norm = vtk.vtkImageNormalize()
482        norm.SetInputData(self.imagedata())
483        norm.Update()
484        self._update(norm.GetOutput())
485        self.pipeline = utils.OperationNode("normalize", parents=[self], c="#4cc9f0")
486        return self

Normalize that scalar components for each point.

def mirror(self, axis='x'):
488    def mirror(self, axis="x"):
489        """
490        Mirror flip along one of the cartesian axes.
491        """
492        img = self.imagedata()
493
494        ff = vtk.vtkImageFlip()
495        ff.SetInputData(img)
496        if axis.lower() == "x":
497            ff.SetFilteredAxis(0)
498        elif axis.lower() == "y":
499            ff.SetFilteredAxis(1)
500        elif axis.lower() == "z":
501            ff.SetFilteredAxis(2)
502        else:
503            vedo.logger.error("mirror must be set to either x, y, z or n")
504            raise RuntimeError()
505        ff.Update()
506        self._update(ff.GetOutput())
507        self.pipeline = utils.OperationNode(f"mirror {axis}", parents=[self], c="#4cc9f0")
508        return self

Mirror flip along one of the cartesian axes.

def operation(self, operation, volume2=None):
510    def operation(self, operation, volume2=None):
511        """
512        Perform operations with `Volume` objects.
513        Keyword `volume2` can be a constant float.
514
515        Possible operations are:
516        ```
517        +, -, /, 1/x, sin, cos, exp, log,
518        abs, **2, sqrt, min, max, atan, atan2, median,
519        mag, dot, gradient, divergence, laplacian.
520        ```
521
522        Examples:
523            - [volume_operations.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/volume_operations.py)
524        """
525        op = operation.lower()
526        image1 = self._data
527
528        mf = None
529        if op in ["median"]:
530            mf = vtk.vtkImageMedian3D()
531            mf.SetInputData(image1)
532        elif op in ["mag"]:
533            mf = vtk.vtkImageMagnitude()
534            mf.SetInputData(image1)
535        elif op in ["dot", "dotproduct"]:
536            mf = vtk.vtkImageDotProduct()
537            mf.SetInput1Data(image1)
538            mf.SetInput2Data(volume2.imagedata())
539        elif op in ["grad", "gradient"]:
540            mf = vtk.vtkImageGradient()
541            mf.SetDimensionality(3)
542            mf.SetInputData(image1)
543        elif op in ["div", "divergence"]:
544            mf = vtk.vtkImageDivergence()
545            mf.SetInputData(image1)
546        elif op in ["laplacian"]:
547            mf = vtk.vtkImageLaplacian()
548            mf.SetDimensionality(3)
549            mf.SetInputData(image1)
550
551        if mf is not None:
552            mf.Update()
553            vol = Volume(mf.GetOutput())
554            vol.pipeline = utils.OperationNode(
555                f"operation\n{op}", parents=[self], c="#4cc9f0", shape="cylinder"
556            )
557            return vol  ###########################
558
559        mat = vtk.vtkImageMathematics()
560        mat.SetInput1Data(image1)
561
562        K = None
563
564        if utils.is_number(volume2):
565            K = volume2
566            mat.SetConstantK(K)
567            mat.SetConstantC(K)
568
569        elif volume2 is not None:  # assume image2 is a constant value
570            mat.SetInput2Data(volume2.imagedata())
571
572        # ###########################
573        if op in ["+", "add", "plus"]:
574            if K:
575                mat.SetOperationToAddConstant()
576            else:
577                mat.SetOperationToAdd()
578
579        elif op in ["-", "subtract", "minus"]:
580            if K:
581                mat.SetConstantC(-float(K))
582                mat.SetOperationToAddConstant()
583            else:
584                mat.SetOperationToSubtract()
585
586        elif op in ["*", "multiply", "times"]:
587            if K:
588                mat.SetOperationToMultiplyByK()
589            else:
590                mat.SetOperationToMultiply()
591
592        elif op in ["/", "divide"]:
593            if K:
594                mat.SetConstantK(1.0 / K)
595                mat.SetOperationToMultiplyByK()
596            else:
597                mat.SetOperationToDivide()
598
599        elif op in ["1/x", "invert"]:
600            mat.SetOperationToInvert()
601        elif op in ["sin"]:
602            mat.SetOperationToSin()
603        elif op in ["cos"]:
604            mat.SetOperationToCos()
605        elif op in ["exp"]:
606            mat.SetOperationToExp()
607        elif op in ["log"]:
608            mat.SetOperationToLog()
609        elif op in ["abs"]:
610            mat.SetOperationToAbsoluteValue()
611        elif op in ["**2", "square"]:
612            mat.SetOperationToSquare()
613        elif op in ["sqrt", "sqr"]:
614            mat.SetOperationToSquareRoot()
615        elif op in ["min"]:
616            mat.SetOperationToMin()
617        elif op in ["max"]:
618            mat.SetOperationToMax()
619        elif op in ["atan"]:
620            mat.SetOperationToATAN()
621        elif op in ["atan2"]:
622            mat.SetOperationToATAN2()
623        else:
624            vedo.logger.error(f"unknown operation {operation}")
625            raise RuntimeError()
626        mat.Update()
627
628        self._update(mat.GetOutput())
629
630        self.pipeline = utils.OperationNode(
631            f"operation\n{op}", parents=[self, volume2], shape="cylinder", c="#4cc9f0"
632        )
633        return self

Perform operations with Volume objects. Keyword volume2 can be a constant float.

Possible operations are:

+, -, /, 1/x, sin, cos, exp, log,
abs, **2, sqrt, min, max, atan, atan2, median,
mag, dot, gradient, divergence, laplacian.
Examples:
def frequency_pass_filter(self, low_cutoff=None, high_cutoff=None, order=1):
635    def frequency_pass_filter(self, low_cutoff=None, high_cutoff=None, order=1):
636        """
637        Low-pass and high-pass filtering become trivial in the frequency domain.
638        A portion of the pixels/voxels are simply masked or attenuated.
639        This function applies a high pass Butterworth filter that attenuates the
640        frequency domain image.
641
642        The gradual attenuation of the filter is important.
643        A simple high-pass filter would simply mask a set of pixels in the frequency domain,
644        but the abrupt transition would cause a ringing effect in the spatial domain.
645
646        Arguments:
647            low_cutoff : (list)
648                the cutoff frequencies for x, y and z
649            high_cutoff : (list)
650                the cutoff frequencies for x, y and z
651            order : (int)
652                order determines sharpness of the cutoff curve
653        """
654        # https://lorensen.github.io/VTKExamples/site/Cxx/ImageProcessing/IdealHighPass
655        fft = vtk.vtkImageFFT()
656        fft.SetInputData(self._data)
657        fft.Update()
658        out = fft.GetOutput()
659
660        if high_cutoff:
661            blp = vtk.vtkImageButterworthLowPass()
662            blp.SetInputData(out)
663            blp.SetCutOff(high_cutoff)
664            blp.SetOrder(order)
665            blp.Update()
666            out = blp.GetOutput()
667
668        if low_cutoff:
669            bhp = vtk.vtkImageButterworthHighPass()
670            bhp.SetInputData(out)
671            bhp.SetCutOff(low_cutoff)
672            bhp.SetOrder(order)
673            bhp.Update()
674            out = bhp.GetOutput()
675
676        rfft = vtk.vtkImageRFFT()
677        rfft.SetInputData(out)
678        rfft.Update()
679
680        ecomp = vtk.vtkImageExtractComponents()
681        ecomp.SetInputData(rfft.GetOutput())
682        ecomp.SetComponents(0)
683        ecomp.Update()
684        self._update(ecomp.GetOutput())
685        self.pipeline = utils.OperationNode("frequency_pass_filter", parents=[self], c="#4cc9f0")
686        return self

Low-pass and high-pass filtering become trivial in the frequency domain. A portion of the pixels/voxels are simply masked or attenuated. This function applies a high pass Butterworth filter that attenuates the frequency domain image.

The gradual attenuation of the filter is important. A simple high-pass filter would simply mask a set of pixels in the frequency domain, but the abrupt transition would cause a ringing effect in the spatial domain.

Arguments:
  • low_cutoff : (list) the cutoff frequencies for x, y and z
  • high_cutoff : (list) the cutoff frequencies for x, y and z
  • order : (int) order determines sharpness of the cutoff curve
def smooth_gaussian(self, sigma=(2, 2, 2), radius=None):
688    def smooth_gaussian(self, sigma=(2, 2, 2), radius=None):
689        """
690        Performs a convolution of the input Volume with a gaussian.
691
692        Arguments:
693            sigma : (float, list)
694                standard deviation(s) in voxel units.
695                A list can be given to smooth in the three direction differently.
696            radius : (float, list)
697                radius factor(s) determine how far out the gaussian
698                kernel will go before being clamped to zero. A list can be given too.
699        """
700        gsf = vtk.vtkImageGaussianSmooth()
701        gsf.SetDimensionality(3)
702        gsf.SetInputData(self.imagedata())
703        if utils.is_sequence(sigma):
704            gsf.SetStandardDeviations(sigma)
705        else:
706            gsf.SetStandardDeviation(sigma)
707        if radius is not None:
708            if utils.is_sequence(radius):
709                gsf.SetRadiusFactors(radius)
710            else:
711                gsf.SetRadiusFactor(radius)
712        gsf.Update()
713        self._update(gsf.GetOutput())
714        self.pipeline = utils.OperationNode("smooth_gaussian", parents=[self], c="#4cc9f0")
715        return self

Performs a convolution of the input Volume with a gaussian.

Arguments:
  • sigma : (float, list) standard deviation(s) in voxel units. A list can be given to smooth in the three direction differently.
  • radius : (float, list) radius factor(s) determine how far out the gaussian kernel will go before being clamped to zero. A list can be given too.
def smooth_median(self, neighbours=(2, 2, 2)):
717    def smooth_median(self, neighbours=(2, 2, 2)):
718        """
719        Median filter that replaces each pixel with the median value
720        from a rectangular neighborhood around that pixel.
721        """
722        imgm = vtk.vtkImageMedian3D()
723        imgm.SetInputData(self.imagedata())
724        if utils.is_sequence(neighbours):
725            imgm.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
726        else:
727            imgm.SetKernelSize(neighbours, neighbours, neighbours)
728        imgm.Update()
729        self._update(imgm.GetOutput())
730        self.pipeline = utils.OperationNode("smooth_median", parents=[self], c="#4cc9f0")
731        return self

Median filter that replaces each pixel with the median value from a rectangular neighborhood around that pixel.

def erode(self, neighbours=(2, 2, 2)):
733    def erode(self, neighbours=(2, 2, 2)):
734        """
735        Replace a voxel with the minimum over an ellipsoidal neighborhood of voxels.
736        If `neighbours` of an axis is 1, no processing is done on that axis.
737
738        Examples:
739            - [erode_dilate.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/erode_dilate.py)
740
741                ![](https://vedo.embl.es/images/volumetric/erode_dilate.png)
742        """
743        ver = vtk.vtkImageContinuousErode3D()
744        ver.SetInputData(self._data)
745        ver.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
746        ver.Update()
747        self._update(ver.GetOutput())
748        self.pipeline = utils.OperationNode("erode", parents=[self], c="#4cc9f0")
749        return self

Replace a voxel with the minimum over an ellipsoidal neighborhood of voxels. If neighbours of an axis is 1, no processing is done on that axis.

Examples:
def dilate(self, neighbours=(2, 2, 2)):
751    def dilate(self, neighbours=(2, 2, 2)):
752        """
753        Replace a voxel with the maximum over an ellipsoidal neighborhood of voxels.
754        If `neighbours` of an axis is 1, no processing is done on that axis.
755
756        Check also `erode()` and `pad()`.
757
758        Examples:
759            - [erode_dilate.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/erode_dilate.py)
760        """
761        ver = vtk.vtkImageContinuousDilate3D()
762        ver.SetInputData(self._data)
763        ver.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
764        ver.Update()
765        self._update(ver.GetOutput())
766        self.pipeline = utils.OperationNode("dilate", parents=[self], c="#4cc9f0")
767        return self

Replace a voxel with the maximum over an ellipsoidal neighborhood of voxels. If neighbours of an axis is 1, no processing is done on that axis.

Check also erode() and pad().

Examples:
def magnitude(self):
769    def magnitude(self):
770        """Colapses components with magnitude function."""
771        imgm = vtk.vtkImageMagnitude()
772        imgm.SetInputData(self.imagedata())
773        imgm.Update()
774        self._update(imgm.GetOutput())
775        self.pipeline = utils.OperationNode("magnitude", parents=[self], c="#4cc9f0")
776        return self

Colapses components with magnitude function.

def topoints(self):
778    def topoints(self):
779        """
780        Extract all image voxels as points.
781        This function takes an input `Volume` and creates an `Mesh`
782        that contains the points and the point attributes.
783
784        Examples:
785            - [vol2points.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/vol2points.py)
786        """
787        v2p = vtk.vtkImageToPoints()
788        v2p.SetInputData(self.imagedata())
789        v2p.Update()
790        mpts = vedo.Points(v2p.GetOutput())
791        mpts.pipeline = utils.OperationNode("topoints", parents=[self], c="#4cc9f0:#e9c46a")
792        return mpts

Extract all image voxels as points. This function takes an input Volume and creates an Mesh that contains the points and the point attributes.

Examples:
def euclidean_distance(self, anisotropy=False, max_distance=None):
794    def euclidean_distance(self, anisotropy=False, max_distance=None):
795        """
796        Implementation of the Euclidean DT (Distance Transform) using Saito's algorithm.
797        The distance map produced contains the square of the Euclidean distance values.
798        The algorithm has a O(n^(D+1)) complexity over n x n x...x n images in D dimensions.
799
800        Check out also: https://en.wikipedia.org/wiki/Distance_transform
801
802        Arguments:
803            anisotropy : bool
804                used to define whether Spacing should be used in the
805                computation of the distances.
806            max_distance : bool
807                any distance bigger than max_distance will not be
808                computed but set to this specified value instead.
809
810        Examples:
811            - [euclidian_dist.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/euclidian_dist.py)
812        """
813        euv = vtk.vtkImageEuclideanDistance()
814        euv.SetInputData(self._data)
815        euv.SetConsiderAnisotropy(anisotropy)
816        if max_distance is not None:
817            euv.InitializeOn()
818            euv.SetMaximumDistance(max_distance)
819        euv.SetAlgorithmToSaito()
820        euv.Update()
821        vol = Volume(euv.GetOutput())
822        vol.pipeline = utils.OperationNode("euclidean_distance", parents=[self], c="#4cc9f0")
823        return vol

Implementation of the Euclidean DT (Distance Transform) using Saito's algorithm. The distance map produced contains the square of the Euclidean distance values. The algorithm has a O(n^(D+1)) complexity over n x n x...x n images in D dimensions.

Check out also: https://en.wikipedia.org/wiki/Distance_transform

Arguments:
  • anisotropy : bool used to define whether Spacing should be used in the computation of the distances.
  • max_distance : bool any distance bigger than max_distance will not be computed but set to this specified value instead.
Examples:
def correlation_with(self, vol2, dim=2):
825    def correlation_with(self, vol2, dim=2):
826        """
827        Find the correlation between two volumetric data sets.
828        Keyword `dim` determines whether the correlation will be 3D, 2D or 1D.
829        The default is a 2D Correlation.
830
831        The output size will match the size of the first input.
832        The second input is considered the correlation kernel.
833        """
834        imc = vtk.vtkImageCorrelation()
835        imc.SetInput1Data(self._data)
836        imc.SetInput2Data(vol2.imagedata())
837        imc.SetDimensionality(dim)
838        imc.Update()
839        vol = Volume(imc.GetOutput())
840
841        vol.pipeline = utils.OperationNode("correlation_with", parents=[self, vol2], c="#4cc9f0")
842        return vol

Find the correlation between two volumetric data sets. Keyword dim determines whether the correlation will be 3D, 2D or 1D. The default is a 2D Correlation.

The output size will match the size of the first input. The second input is considered the correlation kernel.

def scale_voxels(self, scale=1):
844    def scale_voxels(self, scale=1):
845        """Scale the voxel content by factor `scale`."""
846        rsl = vtk.vtkImageReslice()
847        rsl.SetInputData(self.imagedata())
848        rsl.SetScalarScale(scale)
849        rsl.Update()
850        self._update(rsl.GetOutput())
851        self.pipeline = utils.OperationNode(
852            f"scale_voxels\nscale={scale}", parents=[self], c="#4cc9f0"
853        )
854        return self

Scale the voxel content by factor scale.

class Volume(BaseVolume, vedo.base.BaseGrid, vtkmodules.vtkRenderingCore.vtkVolume):
 858class Volume(BaseVolume, BaseGrid, vtk.vtkVolume):
 859    """
 860    Class to describe dataset that are defined on "voxels":
 861    the 3D equivalent of 2D pixels.
 862    """
 863
 864    def __init__(
 865        self,
 866        inputobj=None,
 867        c="RdBu_r",
 868        alpha=(0.0, 0.0, 0.2, 0.4, 0.8, 1.0),
 869        alpha_gradient=None,
 870        alpha_unit=1,
 871        mode=0,
 872        spacing=None,
 873        dims=None,
 874        origin=None,
 875        mapper="smart",
 876    ):
 877        """
 878        This class can be initialized with a numpy object, a `vtkImageData`
 879        or a list of 2D bmp files.
 880
 881        Arguments:
 882            c : (list, str)
 883                sets colors along the scalar range, or a matplotlib color map name
 884            alphas : (float, list)
 885                sets transparencies along the scalar range
 886            alpha_unit : (float)
 887                low values make composite rendering look brighter and denser
 888            origin : (list)
 889                set volume origin coordinates
 890            spacing : (list)
 891                voxel dimensions in x, y and z.
 892            dims : (list)
 893                specify the dimensions of the volume.
 894            mapper : (str)
 895                either 'gpu', 'opengl_gpu', 'fixed' or 'smart'
 896            mode : (int)
 897                define the volumetric rendering style:
 898                    - 0, composite rendering
 899                    - 1, maximum projection
 900                    - 2, minimum projection
 901                    - 3, average projection
 902                    - 4, additive mode
 903
 904                <br>The default mode is "composite" where the scalar values are sampled through
 905                the volume and composited in a front-to-back scheme through alpha blending.
 906                The final color and opacity is determined using the color and opacity transfer
 907                functions specified in alpha keyword.
 908
 909                Maximum and minimum intensity blend modes use the maximum and minimum
 910                scalar values, respectively, along the sampling ray.
 911                The final color and opacity is determined by passing the resultant value
 912                through the color and opacity transfer functions.
 913
 914                Additive blend mode accumulates scalar values by passing each value
 915                through the opacity transfer function and then adding up the product
 916                of the value and its opacity. In other words, the scalar values are scaled
 917                using the opacity transfer function and summed to derive the final color.
 918                Note that the resulting image is always grayscale i.e. aggregated values
 919                are not passed through the color transfer function.
 920                This is because the final value is a derived value and not a real data value
 921                along the sampling ray.
 922
 923                Average intensity blend mode works similar to the additive blend mode where
 924                the scalar values are multiplied by opacity calculated from the opacity
 925                transfer function and then added.
 926                The additional step here is to divide the sum by the number of samples
 927                taken through the volume.
 928                As is the case with the additive intensity projection, the final image will
 929                always be grayscale i.e. the aggregated values are not passed through the
 930                color transfer function.
 931
 932        Example:
 933            ```python
 934            from vedo import Volume
 935            vol = Volume("path/to/mydata/rec*.bmp")
 936            vol.show()
 937            ```
 938
 939        Examples:
 940            - [numpy2volume1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/numpy2volume1.py)
 941
 942                ![](https://vedo.embl.es/images/volumetric/numpy2volume1.png)
 943
 944            - [read_volume2.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/read_volume2.py)
 945
 946                ![](https://vedo.embl.es/images/volumetric/read_volume2.png)
 947
 948        .. note::
 949            if a `list` of values is used for `alphas` this is interpreted
 950            as a transfer function along the range of the scalar.
 951        """
 952        vtk.vtkVolume.__init__(self)
 953        BaseGrid.__init__(self)
 954        BaseVolume.__init__(self)
 955        # super().__init__()
 956
 957        ###################
 958        if isinstance(inputobj, str):
 959
 960            if "https://" in inputobj:
 961                inputobj = vedo.file_io.download(inputobj, verbose=False)  # fpath
 962            elif os.path.isfile(inputobj):
 963                pass
 964            else:
 965                inputobj = sorted(glob.glob(inputobj))
 966
 967        ###################
 968        if "gpu" in mapper:
 969            self._mapper = vtk.vtkGPUVolumeRayCastMapper()
 970        elif "opengl_gpu" in mapper:
 971            self._mapper = vtk.vtkOpenGLGPUVolumeRayCastMapper()
 972        elif "smart" in mapper:
 973            self._mapper = vtk.vtkSmartVolumeMapper()
 974        elif "fixed" in mapper:
 975            self._mapper = vtk.vtkFixedPointVolumeRayCastMapper()
 976        elif isinstance(mapper, vtk.vtkMapper):
 977            self._mapper = mapper
 978        else:
 979            print("Error unknown mapper type", [mapper])
 980            raise RuntimeError()
 981        self.SetMapper(self._mapper)
 982
 983        ###################
 984        inputtype = str(type(inputobj))
 985
 986        # print('Volume inputtype', inputtype, c='b')
 987
 988        if inputobj is None:
 989            img = vtk.vtkImageData()
 990
 991        elif utils.is_sequence(inputobj):
 992
 993            if isinstance(inputobj[0], str) and ".bmp" in inputobj[0].lower():
 994                # scan sequence of BMP files
 995                ima = vtk.vtkImageAppend()
 996                ima.SetAppendAxis(2)
 997                pb = utils.ProgressBar(0, len(inputobj))
 998                for i in pb.range():
 999                    f = inputobj[i]
1000                    if "_rec_spr.bmp" in f:
1001                        continue
1002                    picr = vtk.vtkBMPReader()
1003                    picr.SetFileName(f)
1004                    picr.Update()
1005                    mgf = vtk.vtkImageMagnitude()
1006                    mgf.SetInputData(picr.GetOutput())
1007                    mgf.Update()
1008                    ima.AddInputData(mgf.GetOutput())
1009                    pb.print("loading...")
1010                ima.Update()
1011                img = ima.GetOutput()
1012
1013            else:
1014
1015                if len(inputobj.shape) == 1:
1016                    varr = utils.numpy2vtk(inputobj)
1017                else:
1018                    varr = utils.numpy2vtk(inputobj.ravel(order="F"))
1019                varr.SetName("input_scalars")
1020
1021                img = vtk.vtkImageData()
1022                if dims is not None:
1023                    img.SetDimensions(dims[2], dims[1], dims[0])
1024                else:
1025                    if len(inputobj.shape) == 1:
1026                        vedo.logger.error("must set dimensions (dims keyword) in Volume")
1027                        raise RuntimeError()
1028                    img.SetDimensions(inputobj.shape)
1029                img.GetPointData().AddArray(varr)
1030                img.GetPointData().SetActiveScalars(varr.GetName())
1031
1032        elif "ImageData" in inputtype:
1033            img = inputobj
1034
1035        elif isinstance(inputobj, Volume):
1036            img = inputobj.inputdata()
1037
1038        elif "UniformGrid" in inputtype:
1039            img = inputobj
1040
1041        elif hasattr(inputobj, "GetOutput"):  # passing vtk object, try extract imagdedata
1042            if hasattr(inputobj, "Update"):
1043                inputobj.Update()
1044            img = inputobj.GetOutput()
1045
1046        elif isinstance(inputobj, str):
1047            if "https://" in inputobj:
1048                inputobj = vedo.file_io.download(inputobj, verbose=False)
1049            img = vedo.file_io.loadImageData(inputobj)
1050
1051        else:
1052            vedo.logger.error(f"cannot understand input type {inputtype}")
1053            return
1054
1055        if dims is not None:
1056            img.SetDimensions(dims)
1057
1058        if origin is not None:
1059            img.SetOrigin(origin)  ### DIFFERENT from volume.origin()!
1060
1061        if spacing is not None:
1062            img.SetSpacing(spacing)
1063
1064        self._data = img
1065        self._mapper.SetInputData(img)
1066
1067        if img.GetPointData().GetScalars():
1068            if img.GetPointData().GetScalars().GetNumberOfComponents() == 1:
1069                self.mode(mode).color(c).alpha(alpha).alpha_gradient(alpha_gradient)
1070                self.GetProperty().SetShade(True)
1071                self.GetProperty().SetInterpolationType(1)
1072                self.GetProperty().SetScalarOpacityUnitDistance(alpha_unit)
1073
1074        # remember stuff:
1075        self._mode = mode
1076        self._color = c
1077        self._alpha = alpha
1078        self._alpha_grad = alpha_gradient
1079        self._alpha_unit = alpha_unit
1080
1081        self.pipeline = utils.OperationNode(
1082            "Volume", comment=f"dims={tuple(self.dimensions())}", c="#4cc9f0"
1083        )
1084        #######################################################################
1085
1086    def _update(self, data):
1087        self._data = data
1088        self._data.GetPointData().Modified()
1089        self._mapper.SetInputData(data)
1090        self._mapper.Modified()
1091        self._mapper.Update()
1092        return self
1093
1094    def mode(self, mode=None):
1095        """
1096        Define the volumetric rendering mode following this:
1097            - 0, composite rendering
1098            - 1, maximum projection rendering
1099            - 2, minimum projection rendering
1100            - 3, average projection rendering
1101            - 4, additive mode
1102
1103        The default mode is "composite" where the scalar values are sampled through
1104        the volume and composited in a front-to-back scheme through alpha blending.
1105        The final color and opacity is determined using the color and opacity transfer
1106        functions specified in alpha keyword.
1107
1108        Maximum and minimum intensity blend modes use the maximum and minimum
1109        scalar values, respectively, along the sampling ray.
1110        The final color and opacity is determined by passing the resultant value
1111        through the color and opacity transfer functions.
1112
1113        Additive blend mode accumulates scalar values by passing each value
1114        through the opacity transfer function and then adding up the product
1115        of the value and its opacity. In other words, the scalar values are scaled
1116        using the opacity transfer function and summed to derive the final color.
1117        Note that the resulting image is always grayscale i.e. aggregated values
1118        are not passed through the color transfer function.
1119        This is because the final value is a derived value and not a real data value
1120        along the sampling ray.
1121
1122        Average intensity blend mode works similar to the additive blend mode where
1123        the scalar values are multiplied by opacity calculated from the opacity
1124        transfer function and then added.
1125        The additional step here is to divide the sum by the number of samples
1126        taken through the volume.
1127        As is the case with the additive intensity projection, the final image will
1128        always be grayscale i.e. the aggregated values are not passed through the
1129        color transfer function.
1130        """
1131        if mode is None:
1132            return self._mapper.GetBlendMode()
1133
1134        if isinstance(mode, str):
1135            if "comp" in mode:
1136                mode = 0
1137            elif "proj" in mode:
1138                if "max" in mode:
1139                    mode = 1
1140                elif "min" in mode:
1141                    mode = 2
1142                elif "ave" in mode:
1143                    mode = 3
1144                else:
1145                    vedo.logger.warning(f"unknown mode {mode}")
1146                    mode = 0
1147            elif "add" in mode:
1148                mode = 4
1149            else:
1150                vedo.logger.warning(f"unknown mode {mode}")
1151                mode = 0
1152
1153        self._mapper.SetBlendMode(mode)
1154        self._mode = mode
1155        return self
1156
1157    def shade(self, status=None):
1158        """
1159        Set/Get the shading of a Volume.
1160        Shading can be further controlled with `volume.lighting()` method.
1161
1162        If shading is turned on, the mapper may perform shading calculations.
1163        In some cases shading does not apply
1164        (for example, in maximum intensity projection mode).
1165        """
1166        if status is None:
1167            return self.GetProperty().GetShade()
1168        self.GetProperty().SetShade(status)
1169        return self
1170
1171    def cmap(self, c, alpha=None, vmin=None, vmax=None):
1172        """Same as `color()`.
1173
1174        Arguments:
1175            alpha : (list)
1176                use a list to specify transparencies along the scalar range
1177            vmin : (float)
1178                force the min of the scalar range to be this value
1179            vmax : (float)
1180                force the max of the scalar range to be this value
1181        """
1182        return self.color(c, alpha, vmin, vmax)
1183
1184    def jittering(self, status=None):
1185        """
1186        If `True`, each ray traversal direction will be perturbed slightly
1187        using a noise-texture to get rid of wood-grain effects.
1188        """
1189        if hasattr(self._mapper, "SetUseJittering"):  # tetmesh doesnt have it
1190            if status is None:
1191                return self._mapper.GetUseJittering()
1192            self._mapper.SetUseJittering(status)
1193        return self
1194
1195    def mask(self, data):
1196        """
1197        Mask a volume visualization with a binary value.
1198        Needs to specify keyword mapper='gpu'.
1199
1200        Example:
1201        ```python
1202            from vedo import np, Volume, show
1203            data_matrix = np.zeros([75, 75, 75], dtype=np.uint8)
1204            # all voxels have value zero except:
1205            data_matrix[0:35,   0:35,  0:35] = 1
1206            data_matrix[35:55, 35:55, 35:55] = 2
1207            data_matrix[55:74, 55:74, 55:74] = 3
1208            vol = Volume(data_matrix, c=['white','b','g','r'], mapper='gpu')
1209            data_mask = np.zeros_like(data_matrix)
1210            data_mask[10:65, 10:45, 20:75] = 1
1211            vol.mask(data_mask)
1212            show(vol, axes=1).close()
1213        ```
1214        See also:
1215            `volume.hide_voxels()`
1216        """
1217        mask = Volume(data.astype(np.uint8))
1218        try:
1219            self.mapper().SetMaskTypeToBinary()
1220            self.mapper().SetMaskInput(mask.inputdata())
1221        except AttributeError:
1222            vedo.logger.error("volume.mask() must create the volume with Volume(..., mapper='gpu')")
1223        return self
1224
1225    def hide_voxels(self, ids):
1226        """
1227        Hide voxels (cells) from visualization.
1228
1229        Example:
1230            ```python
1231            from vedo import *
1232            embryo = Volume(dataurl+'embryo.tif')
1233            embryo.hide_voxels(list(range(10000)))
1234            show(embryo, axes=1).close()
1235            ```
1236
1237        See also:
1238            `volume.mask()`
1239        """
1240        ghost_mask = np.zeros(self.ncells, dtype=np.uint8)
1241        ghost_mask[ids] = vtk.vtkDataSetAttributes.HIDDENCELL
1242        name = vtk.vtkDataSetAttributes.GhostArrayName()
1243        garr = utils.numpy2vtk(ghost_mask, name=name, dtype=np.uint8)
1244        self._data.GetCellData().AddArray(garr)
1245        self._data.GetCellData().Modified()
1246        return self
1247
1248    def alpha_gradient(self, alpha_grad, vmin=None, vmax=None):
1249        """
1250        Assign a set of tranparencies to a volume's gradient
1251        along the range of the scalar value.
1252        A single constant value can also be assigned.
1253        The gradient function is used to decrease the opacity
1254        in the "flat" regions of the volume while maintaining the opacity
1255        at the boundaries between material types.  The gradient is measured
1256        as the amount by which the intensity changes over unit distance.
1257
1258        The format for alpha_grad is the same as for method `volume.alpha()`.
1259        """
1260        if vmin is None:
1261            vmin, _ = self._data.GetScalarRange()
1262        if vmax is None:
1263            _, vmax = self._data.GetScalarRange()
1264        self._alpha_grad = alpha_grad
1265        volumeProperty = self.GetProperty()
1266
1267        if alpha_grad is None:
1268            volumeProperty.DisableGradientOpacityOn()
1269            return self
1270
1271        volumeProperty.DisableGradientOpacityOff()
1272
1273        gotf = volumeProperty.GetGradientOpacity()
1274        if utils.is_sequence(alpha_grad):
1275            alpha_grad = np.array(alpha_grad)
1276            if len(alpha_grad.shape) == 1:  # user passing a flat list e.g. (0.0, 0.3, 0.9, 1)
1277                for i, al in enumerate(alpha_grad):
1278                    xalpha = vmin + (vmax - vmin) * i / (len(alpha_grad) - 1)
1279                    # Create transfer mapping scalar value to gradient opacity
1280                    gotf.AddPoint(xalpha, al)
1281            elif len(alpha_grad.shape) == 2:  # user passing [(x0,alpha0), ...]
1282                gotf.AddPoint(vmin, alpha_grad[0][1])
1283                for xalpha, al in alpha_grad:
1284                    # Create transfer mapping scalar value to opacity
1285                    gotf.AddPoint(xalpha, al)
1286                gotf.AddPoint(vmax, alpha_grad[-1][1])
1287            # print("alpha_grad at", round(xalpha, 1), "\tset to", al)
1288        else:
1289            gotf.AddPoint(vmin, alpha_grad)  # constant alpha_grad
1290            gotf.AddPoint(vmax, alpha_grad)
1291        return self
1292
1293    def component_weight(self, i, weight):
1294        """Set the scalar component weight in range [0,1]."""
1295        self.GetProperty().SetComponentWeight(i, weight)
1296        return self
1297
1298    def xslice(self, i):
1299        """Extract the slice at index `i` of volume along x-axis."""
1300        vslice = vtk.vtkImageDataGeometryFilter()
1301        vslice.SetInputData(self.imagedata())
1302        nx, ny, nz = self.imagedata().GetDimensions()
1303        if i > nx - 1:
1304            i = nx - 1
1305        vslice.SetExtent(i, i, 0, ny, 0, nz)
1306        vslice.Update()
1307        m = Mesh(vslice.GetOutput())
1308        m.pipeline = utils.OperationNode(f"xslice {i}", parents=[self], c="#4cc9f0:#e9c46a")
1309        return m
1310
1311    def yslice(self, j):
1312        """Extract the slice at index `j` of volume along y-axis."""
1313        vslice = vtk.vtkImageDataGeometryFilter()
1314        vslice.SetInputData(self.imagedata())
1315        nx, ny, nz = self.imagedata().GetDimensions()
1316        if j > ny - 1:
1317            j = ny - 1
1318        vslice.SetExtent(0, nx, j, j, 0, nz)
1319        vslice.Update()
1320        m = Mesh(vslice.GetOutput())
1321        m.pipeline = utils.OperationNode(f"yslice {j}", parents=[self], c="#4cc9f0:#e9c46a")
1322        return m
1323
1324    def zslice(self, k):
1325        """Extract the slice at index `i` of volume along z-axis."""
1326        vslice = vtk.vtkImageDataGeometryFilter()
1327        vslice.SetInputData(self.imagedata())
1328        nx, ny, nz = self.imagedata().GetDimensions()
1329        if k > nz - 1:
1330            k = nz - 1
1331        vslice.SetExtent(0, nx, 0, ny, k, k)
1332        vslice.Update()
1333        m = Mesh(vslice.GetOutput())
1334        m.pipeline = utils.OperationNode(f"zslice {k}", parents=[self], c="#4cc9f0:#e9c46a")
1335        return m
1336
1337    def slice_plane(self, origin=(0, 0, 0), normal=(1, 1, 1), autocrop=False):
1338        """
1339        Extract the slice along a given plane position and normal.
1340
1341        Example:
1342            - [slice_plane1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/slice_plane1.py)
1343
1344                ![](https://vedo.embl.es/images/volumetric/slicePlane1.gif)
1345        """
1346        reslice = vtk.vtkImageReslice()
1347        reslice.SetInputData(self._data)
1348        reslice.SetOutputDimensionality(2)
1349        newaxis = utils.versor(normal)
1350        pos = np.array(origin)
1351        initaxis = (0, 0, 1)
1352        crossvec = np.cross(initaxis, newaxis)
1353        angle = np.arccos(np.dot(initaxis, newaxis))
1354        T = vtk.vtkTransform()
1355        T.PostMultiply()
1356        T.RotateWXYZ(np.rad2deg(angle), crossvec)
1357        T.Translate(pos)
1358        M = T.GetMatrix()
1359        reslice.SetResliceAxes(M)
1360        reslice.SetInterpolationModeToLinear()
1361        reslice.SetAutoCropOutput(not autocrop)
1362        reslice.Update()
1363        vslice = vtk.vtkImageDataGeometryFilter()
1364        vslice.SetInputData(reslice.GetOutput())
1365        vslice.Update()
1366        msh = Mesh(vslice.GetOutput())
1367        msh.SetOrientation(T.GetOrientation())
1368        msh.SetPosition(pos)
1369        msh.pipeline = utils.OperationNode("slice_plane", parents=[self], c="#4cc9f0:#e9c46a")
1370        return msh
1371
1372    def warp(self, source, target, sigma=1, mode="3d", fit=False):
1373        """
1374        Warp volume scalars within a Volume by specifying
1375        source and target sets of points.
1376
1377        Arguments:
1378            source : (Points, list)
1379                the list of source points
1380            target : (Points, list)
1381                the list of target points
1382            fit : (bool)
1383                fit/adapt the old bounding box to the warped geometry
1384        """
1385        if isinstance(source, vedo.Points):
1386            source = source.points()
1387        if isinstance(target, vedo.Points):
1388            target = target.points()
1389
1390        ns = len(source)
1391        ptsou = vtk.vtkPoints()
1392        ptsou.SetNumberOfPoints(ns)
1393        for i in range(ns):
1394            ptsou.SetPoint(i, source[i])
1395
1396        nt = len(target)
1397        if ns != nt:
1398            vedo.logger.error(f"#source {ns} != {nt} #target points")
1399            raise RuntimeError()
1400
1401        pttar = vtk.vtkPoints()
1402        pttar.SetNumberOfPoints(nt)
1403        for i in range(ns):
1404            pttar.SetPoint(i, target[i])
1405
1406        T = vtk.vtkThinPlateSplineTransform()
1407        if mode.lower() == "3d":
1408            T.SetBasisToR()
1409        elif mode.lower() == "2d":
1410            T.SetBasisToR2LogR()
1411        else:
1412            vedo.logger.error(f"unknown mode {mode}")
1413            raise RuntimeError()
1414
1415        T.SetSigma(sigma)
1416        T.SetSourceLandmarks(ptsou)
1417        T.SetTargetLandmarks(pttar)
1418        T.Inverse()
1419        self.transform = T
1420        self.apply_transform(T, fit=fit)
1421        self.pipeline = utils.OperationNode("warp", parents=[self], c="#4cc9f0")
1422        return self
1423
1424    def apply_transform(self, T, fit=False):
1425        """
1426        Apply a transform to the scalars in the volume.
1427
1428        Arguments:
1429            T : (vtkTransform, matrix)
1430                The transformation to be applied
1431            fit : (bool)
1432                fit/adapt the old bounding box to the warped geometry
1433        """
1434        if isinstance(T, vtk.vtkMatrix4x4):
1435            tr = vtk.vtkTransform()
1436            tr.SetMatrix(T)
1437            T = tr
1438
1439        elif utils.is_sequence(T):
1440            M = vtk.vtkMatrix4x4()
1441            n = len(T[0])
1442            for i in range(n):
1443                for j in range(n):
1444                    M.SetElement(i, j, T[i][j])
1445            tr = vtk.vtkTransform()
1446            tr.SetMatrix(M)
1447            T = tr
1448
1449        reslice = vtk.vtkImageReslice()
1450        reslice.SetInputData(self._data)
1451        reslice.SetResliceTransform(T)
1452        reslice.SetOutputDimensionality(3)
1453        reslice.SetInterpolationModeToLinear()
1454
1455        spacing = self._data.GetSpacing()
1456        origin = self._data.GetOrigin()
1457
1458        if fit:
1459            bb = self.box()
1460            if isinstance(T, vtk.vtkThinPlateSplineTransform):
1461                TI = vtk.vtkThinPlateSplineTransform()
1462                TI.DeepCopy(T)
1463                TI.Inverse()
1464            else:
1465                TI = vtk.vtkTransform()
1466                TI.DeepCopy(T)
1467            bb.apply_transform(TI)
1468            bounds = bb.GetBounds()
1469            bounds = (
1470                bounds[0] / spacing[0],
1471                bounds[1] / spacing[0],
1472                bounds[2] / spacing[1],
1473                bounds[3] / spacing[1],
1474                bounds[4] / spacing[2],
1475                bounds[5] / spacing[2],
1476            )
1477            bounds = np.round(bounds).astype(int)
1478            reslice.SetOutputExtent(bounds)
1479            reslice.SetOutputSpacing(spacing[0], spacing[1], spacing[2])
1480            reslice.SetOutputOrigin(origin[0], origin[1], origin[2])
1481
1482        reslice.Update()
1483        self._update(reslice.GetOutput())
1484        self.pipeline = utils.OperationNode("apply_transform", parents=[self], c="#4cc9f0")
1485        return self

Class to describe dataset that are defined on "voxels": the 3D equivalent of 2D pixels.

Volume( inputobj=None, c='RdBu_r', alpha=(0.0, 0.0, 0.2, 0.4, 0.8, 1.0), alpha_gradient=None, alpha_unit=1, mode=0, spacing=None, dims=None, origin=None, mapper='smart')
 864    def __init__(
 865        self,
 866        inputobj=None,
 867        c="RdBu_r",
 868        alpha=(0.0, 0.0, 0.2, 0.4, 0.8, 1.0),
 869        alpha_gradient=None,
 870        alpha_unit=1,
 871        mode=0,
 872        spacing=None,
 873        dims=None,
 874        origin=None,
 875        mapper="smart",
 876    ):
 877        """
 878        This class can be initialized with a numpy object, a `vtkImageData`
 879        or a list of 2D bmp files.
 880
 881        Arguments:
 882            c : (list, str)
 883                sets colors along the scalar range, or a matplotlib color map name
 884            alphas : (float, list)
 885                sets transparencies along the scalar range
 886            alpha_unit : (float)
 887                low values make composite rendering look brighter and denser
 888            origin : (list)
 889                set volume origin coordinates
 890            spacing : (list)
 891                voxel dimensions in x, y and z.
 892            dims : (list)
 893                specify the dimensions of the volume.
 894            mapper : (str)
 895                either 'gpu', 'opengl_gpu', 'fixed' or 'smart'
 896            mode : (int)
 897                define the volumetric rendering style:
 898                    - 0, composite rendering
 899                    - 1, maximum projection
 900                    - 2, minimum projection
 901                    - 3, average projection
 902                    - 4, additive mode
 903
 904                <br>The default mode is "composite" where the scalar values are sampled through
 905                the volume and composited in a front-to-back scheme through alpha blending.
 906                The final color and opacity is determined using the color and opacity transfer
 907                functions specified in alpha keyword.
 908
 909                Maximum and minimum intensity blend modes use the maximum and minimum
 910                scalar values, respectively, along the sampling ray.
 911                The final color and opacity is determined by passing the resultant value
 912                through the color and opacity transfer functions.
 913
 914                Additive blend mode accumulates scalar values by passing each value
 915                through the opacity transfer function and then adding up the product
 916                of the value and its opacity. In other words, the scalar values are scaled
 917                using the opacity transfer function and summed to derive the final color.
 918                Note that the resulting image is always grayscale i.e. aggregated values
 919                are not passed through the color transfer function.
 920                This is because the final value is a derived value and not a real data value
 921                along the sampling ray.
 922
 923                Average intensity blend mode works similar to the additive blend mode where
 924                the scalar values are multiplied by opacity calculated from the opacity
 925                transfer function and then added.
 926                The additional step here is to divide the sum by the number of samples
 927                taken through the volume.
 928                As is the case with the additive intensity projection, the final image will
 929                always be grayscale i.e. the aggregated values are not passed through the
 930                color transfer function.
 931
 932        Example:
 933            ```python
 934            from vedo import Volume
 935            vol = Volume("path/to/mydata/rec*.bmp")
 936            vol.show()
 937            ```
 938
 939        Examples:
 940            - [numpy2volume1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/numpy2volume1.py)
 941
 942                ![](https://vedo.embl.es/images/volumetric/numpy2volume1.png)
 943
 944            - [read_volume2.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/read_volume2.py)
 945
 946                ![](https://vedo.embl.es/images/volumetric/read_volume2.png)
 947
 948        .. note::
 949            if a `list` of values is used for `alphas` this is interpreted
 950            as a transfer function along the range of the scalar.
 951        """
 952        vtk.vtkVolume.__init__(self)
 953        BaseGrid.__init__(self)
 954        BaseVolume.__init__(self)
 955        # super().__init__()
 956
 957        ###################
 958        if isinstance(inputobj, str):
 959
 960            if "https://" in inputobj:
 961                inputobj = vedo.file_io.download(inputobj, verbose=False)  # fpath
 962            elif os.path.isfile(inputobj):
 963                pass
 964            else:
 965                inputobj = sorted(glob.glob(inputobj))
 966
 967        ###################
 968        if "gpu" in mapper:
 969            self._mapper = vtk.vtkGPUVolumeRayCastMapper()
 970        elif "opengl_gpu" in mapper:
 971            self._mapper = vtk.vtkOpenGLGPUVolumeRayCastMapper()
 972        elif "smart" in mapper:
 973            self._mapper = vtk.vtkSmartVolumeMapper()
 974        elif "fixed" in mapper:
 975            self._mapper = vtk.vtkFixedPointVolumeRayCastMapper()
 976        elif isinstance(mapper, vtk.vtkMapper):
 977            self._mapper = mapper
 978        else:
 979            print("Error unknown mapper type", [mapper])
 980            raise RuntimeError()
 981        self.SetMapper(self._mapper)
 982
 983        ###################
 984        inputtype = str(type(inputobj))
 985
 986        # print('Volume inputtype', inputtype, c='b')
 987
 988        if inputobj is None:
 989            img = vtk.vtkImageData()
 990
 991        elif utils.is_sequence(inputobj):
 992
 993            if isinstance(inputobj[0], str) and ".bmp" in inputobj[0].lower():
 994                # scan sequence of BMP files
 995                ima = vtk.vtkImageAppend()
 996                ima.SetAppendAxis(2)
 997                pb = utils.ProgressBar(0, len(inputobj))
 998                for i in pb.range():
 999                    f = inputobj[i]
1000                    if "_rec_spr.bmp" in f:
1001                        continue
1002                    picr = vtk.vtkBMPReader()
1003                    picr.SetFileName(f)
1004                    picr.Update()
1005                    mgf = vtk.vtkImageMagnitude()
1006                    mgf.SetInputData(picr.GetOutput())
1007                    mgf.Update()
1008                    ima.AddInputData(mgf.GetOutput())
1009                    pb.print("loading...")
1010                ima.Update()
1011                img = ima.GetOutput()
1012
1013            else:
1014
1015                if len(inputobj.shape) == 1:
1016                    varr = utils.numpy2vtk(inputobj)
1017                else:
1018                    varr = utils.numpy2vtk(inputobj.ravel(order="F"))
1019                varr.SetName("input_scalars")
1020
1021                img = vtk.vtkImageData()
1022                if dims is not None:
1023                    img.SetDimensions(dims[2], dims[1], dims[0])
1024                else:
1025                    if len(inputobj.shape) == 1:
1026                        vedo.logger.error("must set dimensions (dims keyword) in Volume")
1027                        raise RuntimeError()
1028                    img.SetDimensions(inputobj.shape)
1029                img.GetPointData().AddArray(varr)
1030                img.GetPointData().SetActiveScalars(varr.GetName())
1031
1032        elif "ImageData" in inputtype:
1033            img = inputobj
1034
1035        elif isinstance(inputobj, Volume):
1036            img = inputobj.inputdata()
1037
1038        elif "UniformGrid" in inputtype:
1039            img = inputobj
1040
1041        elif hasattr(inputobj, "GetOutput"):  # passing vtk object, try extract imagdedata
1042            if hasattr(inputobj, "Update"):
1043                inputobj.Update()
1044            img = inputobj.GetOutput()
1045
1046        elif isinstance(inputobj, str):
1047            if "https://" in inputobj:
1048                inputobj = vedo.file_io.download(inputobj, verbose=False)
1049            img = vedo.file_io.loadImageData(inputobj)
1050
1051        else:
1052            vedo.logger.error(f"cannot understand input type {inputtype}")
1053            return
1054
1055        if dims is not None:
1056            img.SetDimensions(dims)
1057
1058        if origin is not None:
1059            img.SetOrigin(origin)  ### DIFFERENT from volume.origin()!
1060
1061        if spacing is not None:
1062            img.SetSpacing(spacing)
1063
1064        self._data = img
1065        self._mapper.SetInputData(img)
1066
1067        if img.GetPointData().GetScalars():
1068            if img.GetPointData().GetScalars().GetNumberOfComponents() == 1:
1069                self.mode(mode).color(c).alpha(alpha).alpha_gradient(alpha_gradient)
1070                self.GetProperty().SetShade(True)
1071                self.GetProperty().SetInterpolationType(1)
1072                self.GetProperty().SetScalarOpacityUnitDistance(alpha_unit)
1073
1074        # remember stuff:
1075        self._mode = mode
1076        self._color = c
1077        self._alpha = alpha
1078        self._alpha_grad = alpha_gradient
1079        self._alpha_unit = alpha_unit
1080
1081        self.pipeline = utils.OperationNode(
1082            "Volume", comment=f"dims={tuple(self.dimensions())}", c="#4cc9f0"
1083        )
1084        #######################################################################

This class can be initialized with a numpy object, a vtkImageData or a list of 2D bmp files.

Arguments:
  • c : (list, str) sets colors along the scalar range, or a matplotlib color map name
  • alphas : (float, list) sets transparencies along the scalar range
  • alpha_unit : (float) low values make composite rendering look brighter and denser
  • origin : (list) set volume origin coordinates
  • spacing : (list) voxel dimensions in x, y and z.
  • dims : (list) specify the dimensions of the volume.
  • mapper : (str) either 'gpu', 'opengl_gpu', 'fixed' or 'smart'
  • mode : (int) define the volumetric rendering style:

    • 0, composite rendering
    • 1, maximum projection
    • 2, minimum projection
    • 3, average projection
    • 4, additive mode


    The default mode is "composite" where the scalar values are sampled through the volume and composited in a front-to-back scheme through alpha blending. The final color and opacity is determined using the color and opacity transfer functions specified in alpha keyword.

    Maximum and minimum intensity blend modes use the maximum and minimum scalar values, respectively, along the sampling ray. The final color and opacity is determined by passing the resultant value through the color and opacity transfer functions.

    Additive blend mode accumulates scalar values by passing each value through the opacity transfer function and then adding up the product of the value and its opacity. In other words, the scalar values are scaled using the opacity transfer function and summed to derive the final color. Note that the resulting image is always grayscale i.e. aggregated values are not passed through the color transfer function. This is because the final value is a derived value and not a real data value along the sampling ray.

    Average intensity blend mode works similar to the additive blend mode where the scalar values are multiplied by opacity calculated from the opacity transfer function and then added. The additional step here is to divide the sum by the number of samples taken through the volume. As is the case with the additive intensity projection, the final image will always be grayscale i.e. the aggregated values are not passed through the color transfer function.

Example:
from vedo import Volume
vol = Volume("path/to/mydata/rec*.bmp")
vol.show()
Examples:

if a list of values is used for alphas this is interpreted as a transfer function along the range of the scalar.

def mode(self, mode=None):
1094    def mode(self, mode=None):
1095        """
1096        Define the volumetric rendering mode following this:
1097            - 0, composite rendering
1098            - 1, maximum projection rendering
1099            - 2, minimum projection rendering
1100            - 3, average projection rendering
1101            - 4, additive mode
1102
1103        The default mode is "composite" where the scalar values are sampled through
1104        the volume and composited in a front-to-back scheme through alpha blending.
1105        The final color and opacity is determined using the color and opacity transfer
1106        functions specified in alpha keyword.
1107
1108        Maximum and minimum intensity blend modes use the maximum and minimum
1109        scalar values, respectively, along the sampling ray.
1110        The final color and opacity is determined by passing the resultant value
1111        through the color and opacity transfer functions.
1112
1113        Additive blend mode accumulates scalar values by passing each value
1114        through the opacity transfer function and then adding up the product
1115        of the value and its opacity. In other words, the scalar values are scaled
1116        using the opacity transfer function and summed to derive the final color.
1117        Note that the resulting image is always grayscale i.e. aggregated values
1118        are not passed through the color transfer function.
1119        This is because the final value is a derived value and not a real data value
1120        along the sampling ray.
1121
1122        Average intensity blend mode works similar to the additive blend mode where
1123        the scalar values are multiplied by opacity calculated from the opacity
1124        transfer function and then added.
1125        The additional step here is to divide the sum by the number of samples
1126        taken through the volume.
1127        As is the case with the additive intensity projection, the final image will
1128        always be grayscale i.e. the aggregated values are not passed through the
1129        color transfer function.
1130        """
1131        if mode is None:
1132            return self._mapper.GetBlendMode()
1133
1134        if isinstance(mode, str):
1135            if "comp" in mode:
1136                mode = 0
1137            elif "proj" in mode:
1138                if "max" in mode:
1139                    mode = 1
1140                elif "min" in mode:
1141                    mode = 2
1142                elif "ave" in mode:
1143                    mode = 3
1144                else:
1145                    vedo.logger.warning(f"unknown mode {mode}")
1146                    mode = 0
1147            elif "add" in mode:
1148                mode = 4
1149            else:
1150                vedo.logger.warning(f"unknown mode {mode}")
1151                mode = 0
1152
1153        self._mapper.SetBlendMode(mode)
1154        self._mode = mode
1155        return self
Define the volumetric rendering mode following this:
  • 0, composite rendering
  • 1, maximum projection rendering
  • 2, minimum projection rendering
  • 3, average projection rendering
  • 4, additive mode

The default mode is "composite" where the scalar values are sampled through the volume and composited in a front-to-back scheme through alpha blending. The final color and opacity is determined using the color and opacity transfer functions specified in alpha keyword.

Maximum and minimum intensity blend modes use the maximum and minimum scalar values, respectively, along the sampling ray. The final color and opacity is determined by passing the resultant value through the color and opacity transfer functions.

Additive blend mode accumulates scalar values by passing each value through the opacity transfer function and then adding up the product of the value and its opacity. In other words, the scalar values are scaled using the opacity transfer function and summed to derive the final color. Note that the resulting image is always grayscale i.e. aggregated values are not passed through the color transfer function. This is because the final value is a derived value and not a real data value along the sampling ray.

Average intensity blend mode works similar to the additive blend mode where the scalar values are multiplied by opacity calculated from the opacity transfer function and then added. The additional step here is to divide the sum by the number of samples taken through the volume. As is the case with the additive intensity projection, the final image will always be grayscale i.e. the aggregated values are not passed through the color transfer function.

def shade(self, status=None):
1157    def shade(self, status=None):
1158        """
1159        Set/Get the shading of a Volume.
1160        Shading can be further controlled with `volume.lighting()` method.
1161
1162        If shading is turned on, the mapper may perform shading calculations.
1163        In some cases shading does not apply
1164        (for example, in maximum intensity projection mode).
1165        """
1166        if status is None:
1167            return self.GetProperty().GetShade()
1168        self.GetProperty().SetShade(status)
1169        return self

Set/Get the shading of a Volume. Shading can be further controlled with volume.lighting() method.

If shading is turned on, the mapper may perform shading calculations. In some cases shading does not apply (for example, in maximum intensity projection mode).

def cmap(self, c, alpha=None, vmin=None, vmax=None):
1171    def cmap(self, c, alpha=None, vmin=None, vmax=None):
1172        """Same as `color()`.
1173
1174        Arguments:
1175            alpha : (list)
1176                use a list to specify transparencies along the scalar range
1177            vmin : (float)
1178                force the min of the scalar range to be this value
1179            vmax : (float)
1180                force the max of the scalar range to be this value
1181        """
1182        return self.color(c, alpha, vmin, vmax)

Same as color().

Arguments:
  • alpha : (list) use a list to specify transparencies along the scalar range
  • vmin : (float) force the min of the scalar range to be this value
  • vmax : (float) force the max of the scalar range to be this value
def jittering(self, status=None):
1184    def jittering(self, status=None):
1185        """
1186        If `True`, each ray traversal direction will be perturbed slightly
1187        using a noise-texture to get rid of wood-grain effects.
1188        """
1189        if hasattr(self._mapper, "SetUseJittering"):  # tetmesh doesnt have it
1190            if status is None:
1191                return self._mapper.GetUseJittering()
1192            self._mapper.SetUseJittering(status)
1193        return self

If True, each ray traversal direction will be perturbed slightly using a noise-texture to get rid of wood-grain effects.

def mask(self, data):
1195    def mask(self, data):
1196        """
1197        Mask a volume visualization with a binary value.
1198        Needs to specify keyword mapper='gpu'.
1199
1200        Example:
1201        ```python
1202            from vedo import np, Volume, show
1203            data_matrix = np.zeros([75, 75, 75], dtype=np.uint8)
1204            # all voxels have value zero except:
1205            data_matrix[0:35,   0:35,  0:35] = 1
1206            data_matrix[35:55, 35:55, 35:55] = 2
1207            data_matrix[55:74, 55:74, 55:74] = 3
1208            vol = Volume(data_matrix, c=['white','b','g','r'], mapper='gpu')
1209            data_mask = np.zeros_like(data_matrix)
1210            data_mask[10:65, 10:45, 20:75] = 1
1211            vol.mask(data_mask)
1212            show(vol, axes=1).close()
1213        ```
1214        See also:
1215            `volume.hide_voxels()`
1216        """
1217        mask = Volume(data.astype(np.uint8))
1218        try:
1219            self.mapper().SetMaskTypeToBinary()
1220            self.mapper().SetMaskInput(mask.inputdata())
1221        except AttributeError:
1222            vedo.logger.error("volume.mask() must create the volume with Volume(..., mapper='gpu')")
1223        return self

Mask a volume visualization with a binary value. Needs to specify keyword mapper='gpu'.

Example:

    from vedo import np, Volume, show
    data_matrix = np.zeros([75, 75, 75], dtype=np.uint8)
    # all voxels have value zero except:
    data_matrix[0:35,   0:35,  0:35] = 1
    data_matrix[35:55, 35:55, 35:55] = 2
    data_matrix[55:74, 55:74, 55:74] = 3
    vol = Volume(data_matrix, c=['white','b','g','r'], mapper='gpu')
    data_mask = np.zeros_like(data_matrix)
    data_mask[10:65, 10:45, 20:75] = 1
    vol.mask(data_mask)
    show(vol, axes=1).close()
See also:

volume.hide_voxels()

def hide_voxels(self, ids):
1225    def hide_voxels(self, ids):
1226        """
1227        Hide voxels (cells) from visualization.
1228
1229        Example:
1230            ```python
1231            from vedo import *
1232            embryo = Volume(dataurl+'embryo.tif')
1233            embryo.hide_voxels(list(range(10000)))
1234            show(embryo, axes=1).close()
1235            ```
1236
1237        See also:
1238            `volume.mask()`
1239        """
1240        ghost_mask = np.zeros(self.ncells, dtype=np.uint8)
1241        ghost_mask[ids] = vtk.vtkDataSetAttributes.HIDDENCELL
1242        name = vtk.vtkDataSetAttributes.GhostArrayName()
1243        garr = utils.numpy2vtk(ghost_mask, name=name, dtype=np.uint8)
1244        self._data.GetCellData().AddArray(garr)
1245        self._data.GetCellData().Modified()
1246        return self

Hide voxels (cells) from visualization.

Example:
from vedo import *
embryo = Volume(dataurl+'embryo.tif')
embryo.hide_voxels(list(range(10000)))
show(embryo, axes=1).close()
See also:

volume.mask()

def alpha_gradient(self, alpha_grad, vmin=None, vmax=None):
1248    def alpha_gradient(self, alpha_grad, vmin=None, vmax=None):
1249        """
1250        Assign a set of tranparencies to a volume's gradient
1251        along the range of the scalar value.
1252        A single constant value can also be assigned.
1253        The gradient function is used to decrease the opacity
1254        in the "flat" regions of the volume while maintaining the opacity
1255        at the boundaries between material types.  The gradient is measured
1256        as the amount by which the intensity changes over unit distance.
1257
1258        The format for alpha_grad is the same as for method `volume.alpha()`.
1259        """
1260        if vmin is None:
1261            vmin, _ = self._data.GetScalarRange()
1262        if vmax is None:
1263            _, vmax = self._data.GetScalarRange()
1264        self._alpha_grad = alpha_grad
1265        volumeProperty = self.GetProperty()
1266
1267        if alpha_grad is None:
1268            volumeProperty.DisableGradientOpacityOn()
1269            return self
1270
1271        volumeProperty.DisableGradientOpacityOff()
1272
1273        gotf = volumeProperty.GetGradientOpacity()
1274        if utils.is_sequence(alpha_grad):
1275            alpha_grad = np.array(alpha_grad)
1276            if len(alpha_grad.shape) == 1:  # user passing a flat list e.g. (0.0, 0.3, 0.9, 1)
1277                for i, al in enumerate(alpha_grad):
1278                    xalpha = vmin + (vmax - vmin) * i / (len(alpha_grad) - 1)
1279                    # Create transfer mapping scalar value to gradient opacity
1280                    gotf.AddPoint(xalpha, al)
1281            elif len(alpha_grad.shape) == 2:  # user passing [(x0,alpha0), ...]
1282                gotf.AddPoint(vmin, alpha_grad[0][1])
1283                for xalpha, al in alpha_grad:
1284                    # Create transfer mapping scalar value to opacity
1285                    gotf.AddPoint(xalpha, al)
1286                gotf.AddPoint(vmax, alpha_grad[-1][1])
1287            # print("alpha_grad at", round(xalpha, 1), "\tset to", al)
1288        else:
1289            gotf.AddPoint(vmin, alpha_grad)  # constant alpha_grad
1290            gotf.AddPoint(vmax, alpha_grad)
1291        return self

Assign a set of tranparencies to a volume's gradient along the range of the scalar value. A single constant value can also be assigned. The gradient function is used to decrease the opacity in the "flat" regions of the volume while maintaining the opacity at the boundaries between material types. The gradient is measured as the amount by which the intensity changes over unit distance.

The format for alpha_grad is the same as for method volume.alpha().

def component_weight(self, i, weight):
1293    def component_weight(self, i, weight):
1294        """Set the scalar component weight in range [0,1]."""
1295        self.GetProperty().SetComponentWeight(i, weight)
1296        return self

Set the scalar component weight in range [0,1].

def xslice(self, i):
1298    def xslice(self, i):
1299        """Extract the slice at index `i` of volume along x-axis."""
1300        vslice = vtk.vtkImageDataGeometryFilter()
1301        vslice.SetInputData(self.imagedata())
1302        nx, ny, nz = self.imagedata().GetDimensions()
1303        if i > nx - 1:
1304            i = nx - 1
1305        vslice.SetExtent(i, i, 0, ny, 0, nz)
1306        vslice.Update()
1307        m = Mesh(vslice.GetOutput())
1308        m.pipeline = utils.OperationNode(f"xslice {i}", parents=[self], c="#4cc9f0:#e9c46a")
1309        return m

Extract the slice at index i of volume along x-axis.

def yslice(self, j):
1311    def yslice(self, j):
1312        """Extract the slice at index `j` of volume along y-axis."""
1313        vslice = vtk.vtkImageDataGeometryFilter()
1314        vslice.SetInputData(self.imagedata())
1315        nx, ny, nz = self.imagedata().GetDimensions()
1316        if j > ny - 1:
1317            j = ny - 1
1318        vslice.SetExtent(0, nx, j, j, 0, nz)
1319        vslice.Update()
1320        m = Mesh(vslice.GetOutput())
1321        m.pipeline = utils.OperationNode(f"yslice {j}", parents=[self], c="#4cc9f0:#e9c46a")
1322        return m

Extract the slice at index j of volume along y-axis.

def zslice(self, k):
1324    def zslice(self, k):
1325        """Extract the slice at index `i` of volume along z-axis."""
1326        vslice = vtk.vtkImageDataGeometryFilter()
1327        vslice.SetInputData(self.imagedata())
1328        nx, ny, nz = self.imagedata().GetDimensions()
1329        if k > nz - 1:
1330            k = nz - 1
1331        vslice.SetExtent(0, nx, 0, ny, k, k)
1332        vslice.Update()
1333        m = Mesh(vslice.GetOutput())
1334        m.pipeline = utils.OperationNode(f"zslice {k}", parents=[self], c="#4cc9f0:#e9c46a")
1335        return m

Extract the slice at index i of volume along z-axis.

def slice_plane(self, origin=(0, 0, 0), normal=(1, 1, 1), autocrop=False):
1337    def slice_plane(self, origin=(0, 0, 0), normal=(1, 1, 1), autocrop=False):
1338        """
1339        Extract the slice along a given plane position and normal.
1340
1341        Example:
1342            - [slice_plane1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/slice_plane1.py)
1343
1344                ![](https://vedo.embl.es/images/volumetric/slicePlane1.gif)
1345        """
1346        reslice = vtk.vtkImageReslice()
1347        reslice.SetInputData(self._data)
1348        reslice.SetOutputDimensionality(2)
1349        newaxis = utils.versor(normal)
1350        pos = np.array(origin)
1351        initaxis = (0, 0, 1)
1352        crossvec = np.cross(initaxis, newaxis)
1353        angle = np.arccos(np.dot(initaxis, newaxis))
1354        T = vtk.vtkTransform()
1355        T.PostMultiply()
1356        T.RotateWXYZ(np.rad2deg(angle), crossvec)
1357        T.Translate(pos)
1358        M = T.GetMatrix()
1359        reslice.SetResliceAxes(M)
1360        reslice.SetInterpolationModeToLinear()
1361        reslice.SetAutoCropOutput(not autocrop)
1362        reslice.Update()
1363        vslice = vtk.vtkImageDataGeometryFilter()
1364        vslice.SetInputData(reslice.GetOutput())
1365        vslice.Update()
1366        msh = Mesh(vslice.GetOutput())
1367        msh.SetOrientation(T.GetOrientation())
1368        msh.SetPosition(pos)
1369        msh.pipeline = utils.OperationNode("slice_plane", parents=[self], c="#4cc9f0:#e9c46a")
1370        return msh

Extract the slice along a given plane position and normal.

Example:
def warp(self, source, target, sigma=1, mode='3d', fit=False):
1372    def warp(self, source, target, sigma=1, mode="3d", fit=False):
1373        """
1374        Warp volume scalars within a Volume by specifying
1375        source and target sets of points.
1376
1377        Arguments:
1378            source : (Points, list)
1379                the list of source points
1380            target : (Points, list)
1381                the list of target points
1382            fit : (bool)
1383                fit/adapt the old bounding box to the warped geometry
1384        """
1385        if isinstance(source, vedo.Points):
1386            source = source.points()
1387        if isinstance(target, vedo.Points):
1388            target = target.points()
1389
1390        ns = len(source)
1391        ptsou = vtk.vtkPoints()
1392        ptsou.SetNumberOfPoints(ns)
1393        for i in range(ns):
1394            ptsou.SetPoint(i, source[i])
1395
1396        nt = len(target)
1397        if ns != nt:
1398            vedo.logger.error(f"#source {ns} != {nt} #target points")
1399            raise RuntimeError()
1400
1401        pttar = vtk.vtkPoints()
1402        pttar.SetNumberOfPoints(nt)
1403        for i in range(ns):
1404            pttar.SetPoint(i, target[i])
1405
1406        T = vtk.vtkThinPlateSplineTransform()
1407        if mode.lower() == "3d":
1408            T.SetBasisToR()
1409        elif mode.lower() == "2d":
1410            T.SetBasisToR2LogR()
1411        else:
1412            vedo.logger.error(f"unknown mode {mode}")
1413            raise RuntimeError()
1414
1415        T.SetSigma(sigma)
1416        T.SetSourceLandmarks(ptsou)
1417        T.SetTargetLandmarks(pttar)
1418        T.Inverse()
1419        self.transform = T
1420        self.apply_transform(T, fit=fit)
1421        self.pipeline = utils.OperationNode("warp", parents=[self], c="#4cc9f0")
1422        return self

Warp volume scalars within a Volume by specifying source and target sets of points.

Arguments:
  • source : (Points, list) the list of source points
  • target : (Points, list) the list of target points
  • fit : (bool) fit/adapt the old bounding box to the warped geometry
def apply_transform(self, T, fit=False):
1424    def apply_transform(self, T, fit=False):
1425        """
1426        Apply a transform to the scalars in the volume.
1427
1428        Arguments:
1429            T : (vtkTransform, matrix)
1430                The transformation to be applied
1431            fit : (bool)
1432                fit/adapt the old bounding box to the warped geometry
1433        """
1434        if isinstance(T, vtk.vtkMatrix4x4):
1435            tr = vtk.vtkTransform()
1436            tr.SetMatrix(T)
1437            T = tr
1438
1439        elif utils.is_sequence(T):
1440            M = vtk.vtkMatrix4x4()
1441            n = len(T[0])
1442            for i in range(n):
1443                for j in range(n):
1444                    M.SetElement(i, j, T[i][j])
1445            tr = vtk.vtkTransform()
1446            tr.SetMatrix(M)
1447            T = tr
1448
1449        reslice = vtk.vtkImageReslice()
1450        reslice.SetInputData(self._data)
1451        reslice.SetResliceTransform(T)
1452        reslice.SetOutputDimensionality(3)
1453        reslice.SetInterpolationModeToLinear()
1454
1455        spacing = self._data.GetSpacing()
1456        origin = self._data.GetOrigin()
1457
1458        if fit:
1459            bb = self.box()
1460            if isinstance(T, vtk.vtkThinPlateSplineTransform):
1461                TI = vtk.vtkThinPlateSplineTransform()
1462                TI.DeepCopy(T)
1463                TI.Inverse()
1464            else:
1465                TI = vtk.vtkTransform()
1466                TI.DeepCopy(T)
1467            bb.apply_transform(TI)
1468            bounds = bb.GetBounds()
1469            bounds = (
1470                bounds[0] / spacing[0],
1471                bounds[1] / spacing[0],
1472                bounds[2] / spacing[1],
1473                bounds[3] / spacing[1],
1474                bounds[4] / spacing[2],
1475                bounds[5] / spacing[2],
1476            )
1477            bounds = np.round(bounds).astype(int)
1478            reslice.SetOutputExtent(bounds)
1479            reslice.SetOutputSpacing(spacing[0], spacing[1], spacing[2])
1480            reslice.SetOutputOrigin(origin[0], origin[1], origin[2])
1481
1482        reslice.Update()
1483        self._update(reslice.GetOutput())
1484        self.pipeline = utils.OperationNode("apply_transform", parents=[self], c="#4cc9f0")
1485        return self

Apply a transform to the scalars in the volume.

Arguments:
  • T : (vtkTransform, matrix) The transformation to be applied
  • fit : (bool) fit/adapt the old bounding box to the warped geometry
class VolumeSlice(BaseVolume, vedo.base.Base3DProp, vtkmodules.vtkRenderingCore.vtkImageSlice):
1489class VolumeSlice(BaseVolume, Base3DProp, vtk.vtkImageSlice):
1490    """
1491    Derived class of `vtkImageSlice`.
1492    """
1493
1494    def __init__(self, inputobj=None):
1495        """
1496        This class is equivalent to `Volume` except for its representation.
1497        The main purpose of this class is to be used in conjunction with `Volume`
1498        for visualization using `mode="image"`.
1499        """
1500        vtk.vtkImageSlice.__init__(self)
1501        Base3DProp.__init__(self)
1502        BaseVolume.__init__(self)
1503        # super().__init__()
1504
1505        self._mapper = vtk.vtkImageResliceMapper()
1506        self._mapper.SliceFacesCameraOn()
1507        self._mapper.SliceAtFocalPointOn()
1508        self._mapper.SetAutoAdjustImageQuality(False)
1509        self._mapper.BorderOff()
1510
1511        self.lut = None
1512
1513        self.property = vtk.vtkImageProperty()
1514        self.property.SetInterpolationTypeToLinear()
1515        self.SetProperty(self.property)
1516
1517        ###################
1518        if isinstance(inputobj, str):
1519            if "https://" in inputobj:
1520                inputobj = vedo.file_io.download(inputobj, verbose=False)  # fpath
1521            elif os.path.isfile(inputobj):
1522                pass
1523            else:
1524                inputobj = sorted(glob.glob(inputobj))
1525
1526        ###################
1527        inputtype = str(type(inputobj))
1528
1529        if inputobj is None:
1530            img = vtk.vtkImageData()
1531
1532        if isinstance(inputobj, Volume):
1533            img = inputobj.imagedata()
1534            self.lut = utils.ctf2lut(inputobj)
1535
1536        elif utils.is_sequence(inputobj):
1537
1538            if isinstance(inputobj[0], str):  # scan sequence of BMP files
1539                ima = vtk.vtkImageAppend()
1540                ima.SetAppendAxis(2)
1541                pb = utils.ProgressBar(0, len(inputobj))
1542                for i in pb.range():
1543                    f = inputobj[i]
1544                    picr = vtk.vtkBMPReader()
1545                    picr.SetFileName(f)
1546                    picr.Update()
1547                    mgf = vtk.vtkImageMagnitude()
1548                    mgf.SetInputData(picr.GetOutput())
1549                    mgf.Update()
1550                    ima.AddInputData(mgf.GetOutput())
1551                    pb.print("loading...")
1552                ima.Update()
1553                img = ima.GetOutput()
1554
1555            else:
1556                if "ndarray" not in inputtype:
1557                    inputobj = np.array(inputobj)
1558
1559                if len(inputobj.shape) == 1:
1560                    varr = utils.numpy2vtk(inputobj, dtype=float)
1561                else:
1562                    if len(inputobj.shape) > 2:
1563                        inputobj = np.transpose(inputobj, axes=[2, 1, 0])
1564                    varr = utils.numpy2vtk(inputobj.ravel(order="F"), dtype=float)
1565                varr.SetName("input_scalars")
1566
1567                img = vtk.vtkImageData()
1568                img.SetDimensions(inputobj.shape)
1569                img.GetPointData().AddArray(varr)
1570                img.GetPointData().SetActiveScalars(varr.GetName())
1571
1572        elif "ImageData" in inputtype:
1573            img = inputobj
1574
1575        elif isinstance(inputobj, Volume):
1576            img = inputobj.inputdata()
1577
1578        elif "UniformGrid" in inputtype:
1579            img = inputobj
1580
1581        elif hasattr(inputobj, "GetOutput"):  # passing vtk object, try extract imagdedata
1582            if hasattr(inputobj, "Update"):
1583                inputobj.Update()
1584            img = inputobj.GetOutput()
1585
1586        elif isinstance(inputobj, str):
1587            if "https://" in inputobj:
1588                inputobj = vedo.file_io.download(inputobj, verbose=False)
1589            img = vedo.file_io.loadImageData(inputobj)
1590
1591        else:
1592            vedo.logger.error(f"cannot understand input type {inputtype}")
1593            return
1594
1595        self._data = img
1596        self._mapper.SetInputData(img)
1597        self.SetMapper(self._mapper)
1598
1599    def bounds(self):
1600        """Return the bounding box as [x0,x1, y0,y1, z0,z1]"""
1601        bns = [0, 0, 0, 0, 0, 0]
1602        self.GetBounds(bns)
1603        return bns
1604
1605    def colorize(self, lut=None, fix_scalar_range=False):
1606        """
1607        Assign a LUT (Look Up Table) to colorize the slice, leave it `None`
1608        to reuse an existing Volume color map.
1609        Use "bw" for automatic black and white.
1610        """
1611        if lut is None and self.lut:
1612            self.property.SetLookupTable(self.lut)
1613        elif isinstance(lut, vtk.vtkLookupTable):
1614            self.property.SetLookupTable(lut)
1615        elif lut == "bw":
1616            self.property.SetLookupTable(None)
1617        self.property.SetUseLookupTableScalarRange(fix_scalar_range)
1618        return self
1619
1620    def alpha(self, value):
1621        """Set opacity to the slice"""
1622        self.property.SetOpacity(value)
1623        return self
1624
1625    def auto_adjust_quality(self, value=True):
1626        """Automatically reduce the rendering quality for greater speed when interacting"""
1627        self._mapper.SetAutoAdjustImageQuality(value)
1628        return self
1629
1630    def slab(self, thickness=0, mode=0, sample_factor=2):
1631        """
1632        Make a thick slice (slab).
1633
1634        Arguments:
1635            thickness : (float)
1636                set the slab thickness, for thick slicing
1637            mode : (int)
1638                The slab type:
1639                    0 = min
1640                    1 = max
1641                    2 = mean
1642                    3 = sum
1643            sample_factor : (float)
1644                Set the number of slab samples to use as a factor of the number of input slices
1645                within the slab thickness. The default value is 2, but 1 will increase speed
1646                with very little loss of quality.
1647        """
1648        self._mapper.SetSlabThickness(thickness)
1649        self._mapper.SetSlabType(mode)
1650        self._mapper.SetSlabSampleFactor(sample_factor)
1651        return self
1652
1653    def face_camera(self, value=True):
1654        """Make the slice always face the camera or not."""
1655        self._mapper.SetSliceFacesCameraOn(value)
1656        return self
1657
1658    def jump_to_nearest_slice(self, value=True):
1659        """
1660        This causes the slicing to occur at the closest slice to the focal point,
1661        instead of the default behavior where a new slice is interpolated between
1662        the original slices.
1663        Nothing happens if the plane is oblique to the original slices."""
1664        self.SetJumpToNearestSlice(value)
1665        return self
1666
1667    def fill_background(self, value=True):
1668        """
1669        Instead of rendering only to the image border,
1670        render out to the viewport boundary with the background color.
1671        The background color will be the lowest color on the lookup
1672        table that is being used for the image."""
1673        self._mapper.SetBackground(value)
1674        return self
1675
1676    def lighting(self, window, level, ambient=1.0, diffuse=0.0):
1677        """Assign the values for window and color level."""
1678        self.property.SetColorWindow(window)
1679        self.property.SetColorLevel(level)
1680        self.property.SetAmbient(ambient)
1681        self.property.SetDiffuse(diffuse)
1682        return self

Derived class of vtkImageSlice.

VolumeSlice(inputobj=None)
1494    def __init__(self, inputobj=None):
1495        """
1496        This class is equivalent to `Volume` except for its representation.
1497        The main purpose of this class is to be used in conjunction with `Volume`
1498        for visualization using `mode="image"`.
1499        """
1500        vtk.vtkImageSlice.__init__(self)
1501        Base3DProp.__init__(self)
1502        BaseVolume.__init__(self)
1503        # super().__init__()
1504
1505        self._mapper = vtk.vtkImageResliceMapper()
1506        self._mapper.SliceFacesCameraOn()
1507        self._mapper.SliceAtFocalPointOn()
1508        self._mapper.SetAutoAdjustImageQuality(False)
1509        self._mapper.BorderOff()
1510
1511        self.lut = None
1512
1513        self.property = vtk.vtkImageProperty()
1514        self.property.SetInterpolationTypeToLinear()
1515        self.SetProperty(self.property)
1516
1517        ###################
1518        if isinstance(inputobj, str):
1519            if "https://" in inputobj:
1520                inputobj = vedo.file_io.download(inputobj, verbose=False)  # fpath
1521            elif os.path.isfile(inputobj):
1522                pass
1523            else:
1524                inputobj = sorted(glob.glob(inputobj))
1525
1526        ###################
1527        inputtype = str(type(inputobj))
1528
1529        if inputobj is None:
1530            img = vtk.vtkImageData()
1531
1532        if isinstance(inputobj, Volume):
1533            img = inputobj.imagedata()
1534            self.lut = utils.ctf2lut(inputobj)
1535
1536        elif utils.is_sequence(inputobj):
1537
1538            if isinstance(inputobj[0], str):  # scan sequence of BMP files
1539                ima = vtk.vtkImageAppend()
1540                ima.SetAppendAxis(2)
1541                pb = utils.ProgressBar(0, len(inputobj))
1542                for i in pb.range():
1543                    f = inputobj[i]
1544                    picr = vtk.vtkBMPReader()
1545                    picr.SetFileName(f)
1546                    picr.Update()
1547                    mgf = vtk.vtkImageMagnitude()
1548                    mgf.SetInputData(picr.GetOutput())
1549                    mgf.Update()
1550                    ima.AddInputData(mgf.GetOutput())
1551                    pb.print("loading...")
1552                ima.Update()
1553                img = ima.GetOutput()
1554
1555            else:
1556                if "ndarray" not in inputtype:
1557                    inputobj = np.array(inputobj)
1558
1559                if len(inputobj.shape) == 1:
1560                    varr = utils.numpy2vtk(inputobj, dtype=float)
1561                else:
1562                    if len(inputobj.shape) > 2:
1563                        inputobj = np.transpose(inputobj, axes=[2, 1, 0])
1564                    varr = utils.numpy2vtk(inputobj.ravel(order="F"), dtype=float)
1565                varr.SetName("input_scalars")
1566
1567                img = vtk.vtkImageData()
1568                img.SetDimensions(inputobj.shape)
1569                img.GetPointData().AddArray(varr)
1570                img.GetPointData().SetActiveScalars(varr.GetName())
1571
1572        elif "ImageData" in inputtype:
1573            img = inputobj
1574
1575        elif isinstance(inputobj, Volume):
1576            img = inputobj.inputdata()
1577
1578        elif "UniformGrid" in inputtype:
1579            img = inputobj
1580
1581        elif hasattr(inputobj, "GetOutput"):  # passing vtk object, try extract imagdedata
1582            if hasattr(inputobj, "Update"):
1583                inputobj.Update()
1584            img = inputobj.GetOutput()
1585
1586        elif isinstance(inputobj, str):
1587            if "https://" in inputobj:
1588                inputobj = vedo.file_io.download(inputobj, verbose=False)
1589            img = vedo.file_io.loadImageData(inputobj)
1590
1591        else:
1592            vedo.logger.error(f"cannot understand input type {inputtype}")
1593            return
1594
1595        self._data = img
1596        self._mapper.SetInputData(img)
1597        self.SetMapper(self._mapper)

This class is equivalent to Volume except for its representation. The main purpose of this class is to be used in conjunction with Volume for visualization using mode="image".

def bounds(self):
1599    def bounds(self):
1600        """Return the bounding box as [x0,x1, y0,y1, z0,z1]"""
1601        bns = [0, 0, 0, 0, 0, 0]
1602        self.GetBounds(bns)
1603        return bns

Return the bounding box as [x0,x1, y0,y1, z0,z1]

def colorize(self, lut=None, fix_scalar_range=False):
1605    def colorize(self, lut=None, fix_scalar_range=False):
1606        """
1607        Assign a LUT (Look Up Table) to colorize the slice, leave it `None`
1608        to reuse an existing Volume color map.
1609        Use "bw" for automatic black and white.
1610        """
1611        if lut is None and self.lut:
1612            self.property.SetLookupTable(self.lut)
1613        elif isinstance(lut, vtk.vtkLookupTable):
1614            self.property.SetLookupTable(lut)
1615        elif lut == "bw":
1616            self.property.SetLookupTable(None)
1617        self.property.SetUseLookupTableScalarRange(fix_scalar_range)
1618        return self

Assign a LUT (Look Up Table) to colorize the slice, leave it None to reuse an existing Volume color map. Use "bw" for automatic black and white.

def alpha(self, value):
1620    def alpha(self, value):
1621        """Set opacity to the slice"""
1622        self.property.SetOpacity(value)
1623        return self

Set opacity to the slice

def auto_adjust_quality(self, value=True):
1625    def auto_adjust_quality(self, value=True):
1626        """Automatically reduce the rendering quality for greater speed when interacting"""
1627        self._mapper.SetAutoAdjustImageQuality(value)
1628        return self

Automatically reduce the rendering quality for greater speed when interacting

def slab(self, thickness=0, mode=0, sample_factor=2):
1630    def slab(self, thickness=0, mode=0, sample_factor=2):
1631        """
1632        Make a thick slice (slab).
1633
1634        Arguments:
1635            thickness : (float)
1636                set the slab thickness, for thick slicing
1637            mode : (int)
1638                The slab type:
1639                    0 = min
1640                    1 = max
1641                    2 = mean
1642                    3 = sum
1643            sample_factor : (float)
1644                Set the number of slab samples to use as a factor of the number of input slices
1645                within the slab thickness. The default value is 2, but 1 will increase speed
1646                with very little loss of quality.
1647        """
1648        self._mapper.SetSlabThickness(thickness)
1649        self._mapper.SetSlabType(mode)
1650        self._mapper.SetSlabSampleFactor(sample_factor)
1651        return self

Make a thick slice (slab).

Arguments:
  • thickness : (float) set the slab thickness, for thick slicing
  • mode : (int) The slab type: 0 = min 1 = max 2 = mean 3 = sum
  • sample_factor : (float) Set the number of slab samples to use as a factor of the number of input slices within the slab thickness. The default value is 2, but 1 will increase speed with very little loss of quality.
def face_camera(self, value=True):
1653    def face_camera(self, value=True):
1654        """Make the slice always face the camera or not."""
1655        self._mapper.SetSliceFacesCameraOn(value)
1656        return self

Make the slice always face the camera or not.

def jump_to_nearest_slice(self, value=True):
1658    def jump_to_nearest_slice(self, value=True):
1659        """
1660        This causes the slicing to occur at the closest slice to the focal point,
1661        instead of the default behavior where a new slice is interpolated between
1662        the original slices.
1663        Nothing happens if the plane is oblique to the original slices."""
1664        self.SetJumpToNearestSlice(value)
1665        return self

This causes the slicing to occur at the closest slice to the focal point, instead of the default behavior where a new slice is interpolated between the original slices. Nothing happens if the plane is oblique to the original slices.

def fill_background(self, value=True):
1667    def fill_background(self, value=True):
1668        """
1669        Instead of rendering only to the image border,
1670        render out to the viewport boundary with the background color.
1671        The background color will be the lowest color on the lookup
1672        table that is being used for the image."""
1673        self._mapper.SetBackground(value)
1674        return self

Instead of rendering only to the image border, render out to the viewport boundary with the background color. The background color will be the lowest color on the lookup table that is being used for the image.

def lighting(self, window, level, ambient=1.0, diffuse=0.0):
1676    def lighting(self, window, level, ambient=1.0, diffuse=0.0):
1677        """Assign the values for window and color level."""
1678        self.property.SetColorWindow(window)
1679        self.property.SetColorLevel(level)
1680        self.property.SetAmbient(ambient)
1681        self.property.SetDiffuse(diffuse)
1682        return self

Assign the values for window and color level.