vedo.volume

Work with volumetric datasets (voxel data).

   1import glob
   2import os
   3from deprecated import deprecated
   4
   5import numpy as np
   6
   7try:
   8    import vedo.vtkclasses as vtk
   9except ImportError:
  10    import vtkmodules.all as vtk
  11
  12import vedo
  13from vedo import utils
  14from vedo.base import Base3DProp
  15from vedo.base import BaseGrid
  16from vedo.mesh import Mesh
  17
  18
  19__docformat__ = "google"
  20
  21__doc__ = """
  22Work with volumetric datasets (voxel data).
  23
  24![](https://vedo.embl.es/images/volumetric/slicePlane2.png)
  25"""
  26
  27__all__ = [
  28    "BaseVolume",  # included to generate documentation in pydoc
  29    "Volume",
  30    "VolumeSlice",
  31]
  32
  33
  34##########################################################################
  35class BaseVolume:
  36    """
  37    Base class. Do not instantiate.
  38    """
  39    def __init__(self):
  40        """Base class. Do not instantiate."""
  41
  42        self._data = None
  43        self._mapper = None
  44        self.transform = None
  45
  46    def _repr_html_(self):
  47        """
  48        HTML representation of the Volume object for Jupyter Notebooks.
  49        
  50        Returns:
  51            HTML text with the image and some properties.
  52        """
  53        import io
  54        import base64
  55        from PIL import Image
  56
  57        library_name = "vedo.volume.Volume"
  58        help_url = "https://vedo.embl.es/docs/vedo/volume.html"
  59
  60        arr = self.thumbnail(azimuth=0, elevation=-60, zoom=1.4, axes=True)
  61
  62        im = Image.fromarray(arr)
  63        buffered = io.BytesIO()
  64        im.save(buffered, format="PNG", quality=100)
  65        encoded = base64.b64encode(buffered.getvalue()).decode("utf-8")
  66        url = "data:image/png;base64," + encoded
  67        image = f"<img src='{url}'></img>"
  68
  69        # statisitics
  70        bounds = "<br/>".join(
  71            [
  72                utils.precision(min_x,4) + " ... " + utils.precision(max_x,4)
  73                for min_x, max_x in zip(self.bounds()[::2], self.bounds()[1::2])
  74            ]
  75        )
  76
  77        help_text = ""
  78        if self.name:
  79            help_text += f"<b> {self.name}: &nbsp&nbsp</b>"
  80        help_text += '<b><a href="' + help_url + '" target="_blank">' + library_name + "</a></b>" 
  81        if self.filename:
  82            dots = ""
  83            if len(self.filename) > 30:
  84                dots = "..."
  85            help_text += f"<br/><code><i>({dots}{self.filename[-30:]})</i></code>"
  86        
  87        pdata = ""
  88        if self._data.GetPointData().GetScalars():
  89            if self._data.GetPointData().GetScalars().GetName():
  90                name = self._data.GetPointData().GetScalars().GetName()
  91                pdata = "<tr><td><b> point data array </b></td><td>" + name + "</td></tr>"
  92
  93        cdata = ""
  94        if self._data.GetCellData().GetScalars():
  95            if self._data.GetCellData().GetScalars().GetName():
  96                name = self._data.GetCellData().GetScalars().GetName()
  97                cdata = "<tr><td><b> voxel data array </b></td><td>" + name + "</td></tr>"
  98
  99        img = self.GetMapper().GetInput()
 100
 101        all = [
 102            "<table>",
 103            "<tr>", 
 104            "<td>", image, "</td>",
 105            "<td style='text-align: center; vertical-align: center;'><br/>", help_text,
 106            "<table>",
 107            "<tr><td><b> bounds </b> <br/> (x/y/z) </td><td>" + str(bounds) + "</td></tr>",
 108            "<tr><td><b> dimensions </b></td><td>" + str(img.GetDimensions()) + "</td></tr>",
 109            "<tr><td><b> voxel spacing </b></td><td>" + utils.precision(img.GetSpacing(),3) + "</td></tr>",
 110            "<tr><td><b> in memory size </b></td><td>" + str(int(img.GetActualMemorySize() / 1024)) + "MB</td></tr>",
 111            pdata,
 112            cdata,
 113            "<tr><td><b> scalar range </b></td><td>" + utils.precision(img.GetScalarRange(), 4) + "</td></tr>",
 114            "</table>",
 115            "</table>",
 116        ]
 117        return "\n".join(all)
 118
 119    def _update(self, img):
 120        self._data = img
 121        self._data.GetPointData().Modified()
 122        self._mapper.SetInputData(img)
 123        self._mapper.Modified()
 124        self._mapper.Update()
 125        return self
 126
 127    def clone(self):
 128        """Return a clone copy of the Volume."""
 129        newimg = vtk.vtkImageData()
 130        newimg.CopyStructure(self._data)
 131        newimg.CopyAttributes(self._data)
 132
 133        newvol = Volume(newimg)
 134        prop = vtk.vtkVolumeProperty()
 135        prop.DeepCopy(self.GetProperty())
 136        newvol.SetProperty(prop)
 137        newvol.SetOrigin(self.GetOrigin())
 138        newvol.SetScale(self.GetScale())
 139        newvol.SetOrientation(self.GetOrientation())
 140        newvol.SetPosition(self.GetPosition())
 141        newvol.pipeline = utils.OperationNode(
 142            "clone", parents=[self], c="#bbd0ff", shape="diamond")
 143        return newvol
 144
 145    def imagedata(self):
 146        """Return the underlying `vtkImagaData` object."""
 147        return self._data
 148
 149    def tonumpy(self):
 150        """
 151        Get read-write access to voxels of a Volume object as a numpy array.
 152
 153        When you set values in the output image, you don't want numpy to reallocate the array
 154        but instead set values in the existing array, so use the [:] operator.
 155
 156        Example:
 157            `arr[:] = arr*2 + 15`
 158
 159        If the array is modified add a call to:
 160        `volume.imagedata().GetPointData().GetScalars().Modified()`
 161        when all your modifications are completed.
 162        """
 163        narray_shape = tuple(reversed(self._data.GetDimensions()))
 164
 165        scals = self._data.GetPointData().GetScalars()
 166        comps = scals.GetNumberOfComponents()
 167        if comps == 1:
 168            narray = utils.vtk2numpy(scals).reshape(narray_shape)
 169            narray = np.transpose(narray, axes=[2, 1, 0])
 170        else:
 171            narray = utils.vtk2numpy(scals).reshape(*narray_shape, comps)
 172            narray = np.transpose(narray, axes=[2, 1, 0, 3])
 173
 174        # narray = utils.vtk2numpy(self._data.GetPointData().GetScalars()).reshape(narray_shape)
 175        # narray = np.transpose(narray, axes=[2, 1, 0])
 176
 177        return narray
 178
 179    def dimensions(self):
 180        """Return the nr. of voxels in the 3 dimensions."""
 181        return np.array(self._data.GetDimensions())
 182
 183    @deprecated(reason=vedo.colors.red + "Please use scalar_range()" + vedo.colors.reset)
 184    def scalarRange(self):
 185        "Deprecated. Please use `scalar_range()`."
 186        return self.scalar_range()
 187
 188    def scalar_range(self):
 189        """Return the range of the scalar values."""
 190        return np.array(self._data.GetScalarRange())
 191
 192    def spacing(self, s=None):
 193        """Set/get the voxels size in the 3 dimensions."""
 194        if s is not None:
 195            self._data.SetSpacing(s)
 196            return self
 197        return np.array(self._data.GetSpacing())
 198
 199    # def pos(self, p=None): # conflicts with API of base.x()
 200    #     """Same effect as calling `origin()`."""
 201    #     return self.origin(p)
 202
 203    def origin(self, s=None):
 204        """Set/get the origin of the volumetric dataset."""
 205        ### supersedes base.origin()
 206        ### DIFFERENT from base.origin()!
 207        if s is not None:
 208            self._data.SetOrigin(s)
 209            return self
 210        return np.array(self._data.GetOrigin())
 211
 212    def center(self, p=None):
 213        """Set/get the center of the volumetric dataset."""
 214        if p is not None:
 215            self._data.SetCenter(p)
 216            return self
 217        return np.array(self._data.GetCenter())
 218
 219    def permute_axes(self, x, y, z):
 220        """
 221        Reorder the axes of the Volume by specifying
 222        the input axes which are supposed to become the new X, Y, and Z.
 223        """
 224        imp = vtk.vtkImagePermute()
 225        imp.SetFilteredAxes(x, y, z)
 226        imp.SetInputData(self.imagedata())
 227        imp.Update()
 228        self._update(imp.GetOutput())
 229        self.pipeline = utils.OperationNode(
 230            f"permute_axes\n{tuple(x,y,z)}", parents=[self], c="#4cc9f0")
 231        return self
 232
 233    def resample(self, new_spacing, interpolation=1):
 234        """
 235        Resamples a `Volume` to be larger or smaller.
 236
 237        This method modifies the spacing of the input.
 238        Linear interpolation is used to resample the data.
 239
 240        Arguments:
 241            new_spacing : (list)
 242                a list of 3 new spacings for the 3 axes
 243            interpolation : (int)
 244                0=nearest_neighbor, 1=linear, 2=cubic
 245        """
 246        rsp = vtk.vtkImageResample()
 247        oldsp = self.spacing()
 248        for i in range(3):
 249            if oldsp[i] != new_spacing[i]:
 250                rsp.SetAxisOutputSpacing(i, new_spacing[i])
 251        rsp.InterpolateOn()
 252        rsp.SetInterpolationMode(interpolation)
 253        rsp.OptimizationOn()
 254        rsp.Update()
 255        self._update(rsp.GetOutput())
 256        self.pipeline = utils.OperationNode(
 257            f"resample\n{tuple(new_spacing)}", parents=[self], c="#4cc9f0")
 258        return self
 259
 260    def interpolation(self, itype):
 261        """
 262        Set interpolation type.
 263
 264        0=nearest neighbour, 1=linear
 265        """
 266        self.property.SetInterpolationType(itype)
 267        return self
 268
 269    def threshold(self, above=None, below=None, replace=None, replace_value=None):
 270        """
 271        Binary or continuous volume thresholding.
 272        Find the voxels that contain a value above/below the input values
 273        and replace them with a new value (default is 0).
 274        """
 275        th = vtk.vtkImageThreshold()
 276        th.SetInputData(self.imagedata())
 277
 278        # sanity checks
 279        if above is not None and below is not None:
 280            if above == below:
 281                return self
 282            if above > below:
 283                vedo.logger.warning("in volume.threshold(), above > below, skip.")
 284                return self
 285
 286        ## cases
 287        if below is not None and above is not None:
 288            th.ThresholdBetween(above, below)
 289
 290        elif above is not None:
 291            th.ThresholdByUpper(above)
 292
 293        elif below is not None:
 294            th.ThresholdByLower(below)
 295
 296        ##
 297        if replace is not None:
 298            th.SetReplaceIn(True)
 299            th.SetInValue(replace)
 300        else:
 301            th.SetReplaceIn(False)
 302
 303        if replace_value is not None:
 304            th.SetReplaceOut(True)
 305            th.SetOutValue(replace_value)
 306        else:
 307            th.SetReplaceOut(False)
 308
 309        th.Update()
 310        self._update(th.GetOutput())
 311        self.pipeline = utils.OperationNode("threshold", parents=[self], c="#4cc9f0")
 312        return self
 313
 314    def crop(
 315            self,
 316            left=None, right=None,
 317            back=None, front=None,
 318            bottom=None, top=None,
 319            VOI=()
 320        ):
 321        """
 322        Crop a `Volume` object.
 323
 324        Arguments:
 325            left : (float)
 326                fraction to crop from the left plane (negative x)
 327            right : (float)
 328                fraction to crop from the right plane (positive x)
 329            back : (float)
 330                fraction to crop from the back plane (negative y)
 331            front : (float)
 332                fraction to crop from the front plane (positive y)
 333            bottom : (float)
 334                fraction to crop from the bottom plane (negative z)
 335            top : (float)
 336                fraction to crop from the top plane (positive z)
 337            VOI : (list)
 338                extract Volume Of Interest expressed in voxel numbers
 339
 340        Example:
 341            `vol.crop(VOI=(xmin, xmax, ymin, ymax, zmin, zmax)) # all integers nrs`
 342        """
 343        extractVOI = vtk.vtkExtractVOI()
 344        extractVOI.SetInputData(self.imagedata())
 345
 346        if VOI:
 347            extractVOI.SetVOI(VOI)
 348        else:
 349            d = self.imagedata().GetDimensions()
 350            bx0, bx1, by0, by1, bz0, bz1 = 0, d[0]-1, 0, d[1]-1, 0, d[2]-1
 351            if left is not None:   bx0 = int((d[0]-1)*left)
 352            if right is not None:  bx1 = int((d[0]-1)*(1-right))
 353            if back is not None:   by0 = int((d[1]-1)*back)
 354            if front is not None:  by1 = int((d[1]-1)*(1-front))
 355            if bottom is not None: bz0 = int((d[2]-1)*bottom)
 356            if top is not None:    bz1 = int((d[2]-1)*(1-top))
 357            extractVOI.SetVOI(bx0, bx1, by0, by1, bz0, bz1)
 358        extractVOI.Update()
 359        self._update(extractVOI.GetOutput())
 360
 361        self.pipeline = utils.OperationNode(
 362            "crop", parents=[self], c="#4cc9f0",
 363            comment=f"dims={tuple(self.dimensions())}",
 364        )
 365        return self
 366
 367    def append(self, volumes, axis="z", preserve_extents=False):
 368        """
 369        Take the components from multiple inputs and merges them into one output.
 370        Except for the append axis, all inputs must have the same extent.
 371        All inputs must have the same number of scalar components.
 372        The output has the same origin and spacing as the first input.
 373        The origin and spacing of all other inputs are ignored.
 374        All inputs must have the same scalar type.
 375
 376        Arguments:
 377            axis : (int, str)
 378                axis expanded to hold the multiple images
 379            preserve_extents : (bool)
 380                if True, the extent of the inputs is used to place
 381                the image in the output. The whole extent of the output is the union of the input
 382                whole extents. Any portion of the output not covered by the inputs is set to zero.
 383                The origin and spacing is taken from the first input.
 384
 385        Example:
 386            ```python
 387            from vedo import Volume, dataurl
 388            vol = Volume(dataurl+'embryo.tif')
 389            vol.append(vol, axis='x').show().close()
 390            ```
 391            ![](https://vedo.embl.es/images/feats/volume_append.png)
 392        """
 393        ima = vtk.vtkImageAppend()
 394        ima.SetInputData(self.imagedata())
 395        if not utils.is_sequence(volumes):
 396            volumes = [volumes]
 397        for volume in volumes:
 398            if isinstance(volume, vtk.vtkImageData):
 399                ima.AddInputData(volume)
 400            else:
 401                ima.AddInputData(volume.imagedata())
 402        ima.SetPreserveExtents(preserve_extents)
 403        if axis == "x":
 404            axis = 0
 405        elif axis == "y":
 406            axis = 1
 407        elif axis == "z":
 408            axis = 2
 409        ima.SetAppendAxis(axis)
 410        ima.Update()
 411        self._update(ima.GetOutput())
 412        
 413        self.pipeline = utils.OperationNode(
 414            "append", parents=[self, *volumes], c="#4cc9f0",
 415            comment=f"dims={tuple(self.dimensions())}",
 416        )
 417        return self
 418
 419    def resize(self, *newdims):
 420        """Increase or reduce the number of voxels of a Volume with interpolation."""
 421        old_dims = np.array(self.imagedata().GetDimensions())
 422        old_spac = np.array(self.imagedata().GetSpacing())
 423        rsz = vtk.vtkImageResize()
 424        rsz.SetResizeMethodToOutputDimensions()
 425        rsz.SetInputData(self.imagedata())
 426        rsz.SetOutputDimensions(newdims)
 427        rsz.Update()
 428        self._data = rsz.GetOutput()
 429        new_spac = old_spac * old_dims / newdims  # keep aspect ratio
 430        self._data.SetSpacing(new_spac)
 431        self._update(self._data)
 432        self.pipeline = utils.OperationNode(
 433            "resize", parents=[self], c="#4cc9f0",
 434            comment=f"dims={tuple(self.dimensions())}",
 435        )
 436        return self
 437
 438    def normalize(self):
 439        """Normalize that scalar components for each point."""
 440        norm = vtk.vtkImageNormalize()
 441        norm.SetInputData(self.imagedata())
 442        norm.Update()
 443        self._update(norm.GetOutput())
 444        self.pipeline = utils.OperationNode("normalize", parents=[self], c="#4cc9f0")
 445        return self
 446
 447    def mirror(self, axis="x"):
 448        """
 449        Mirror flip along one of the cartesian axes.
 450        """
 451        img = self.imagedata()
 452
 453        ff = vtk.vtkImageFlip()
 454        ff.SetInputData(img)
 455        if axis.lower() == "x":
 456            ff.SetFilteredAxis(0)
 457        elif axis.lower() == "y":
 458            ff.SetFilteredAxis(1)
 459        elif axis.lower() == "z":
 460            ff.SetFilteredAxis(2)
 461        else:
 462            vedo.logger.error("mirror must be set to either x, y, z or n")
 463            raise RuntimeError()
 464        ff.Update()
 465        self._update(ff.GetOutput())
 466        self.pipeline = utils.OperationNode(f"mirror {axis}", parents=[self], c="#4cc9f0")
 467        return self
 468
 469    def operation(self, operation, volume2=None):
 470        """
 471        Perform operations with `Volume` objects.
 472        Keyword `volume2` can be a constant float.
 473
 474        Possible operations are:
 475        ```
 476        +, -, /, 1/x, sin, cos, exp, log,
 477        abs, **2, sqrt, min, max, atan, atan2, median,
 478        mag, dot, gradient, divergence, laplacian.
 479        ```
 480
 481        Examples:
 482            - [volume_operations.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/volume_operations.py)
 483        """
 484        op = operation.lower()
 485        image1 = self._data
 486
 487        mf = None
 488        if op in ["median"]:
 489            mf = vtk.vtkImageMedian3D()
 490            mf.SetInputData(image1)
 491        elif op in ["mag"]:
 492            mf = vtk.vtkImageMagnitude()
 493            mf.SetInputData(image1)
 494        elif op in ["dot", "dotproduct"]:
 495            mf = vtk.vtkImageDotProduct()
 496            mf.SetInput1Data(image1)
 497            mf.SetInput2Data(volume2.imagedata())
 498        elif op in ["grad", "gradient"]:
 499            mf = vtk.vtkImageGradient()
 500            mf.SetDimensionality(3)
 501            mf.SetInputData(image1)
 502        elif op in ["div", "divergence"]:
 503            mf = vtk.vtkImageDivergence()
 504            mf.SetInputData(image1)
 505        elif op in ["laplacian"]:
 506            mf = vtk.vtkImageLaplacian()
 507            mf.SetDimensionality(3)
 508            mf.SetInputData(image1)
 509        
 510        if mf is not None:
 511            mf.Update()
 512            vol = Volume(mf.GetOutput())
 513            vol.pipeline = utils.OperationNode(
 514                f"operation\n{op}", parents=[self], c="#4cc9f0",
 515                shape="cylinder",
 516            )
 517            return vol ###########################
 518
 519        mat = vtk.vtkImageMathematics()
 520        mat.SetInput1Data(image1)
 521
 522        K = None
 523
 524        if utils.is_number(volume2):
 525            K = volume2
 526            mat.SetConstantK(K)
 527            mat.SetConstantC(K)
 528
 529        elif volume2 is not None:  # assume image2 is a constant value
 530            mat.SetInput2Data(volume2.imagedata())
 531
 532        # ###########################
 533        if op in ["+", "add", "plus"]:
 534            if K:
 535                mat.SetOperationToAddConstant()
 536            else:
 537                mat.SetOperationToAdd()
 538
 539        elif op in ["-", "subtract", "minus"]:
 540            if K:
 541                mat.SetConstantC(-float(K))
 542                mat.SetOperationToAddConstant()
 543            else:
 544                mat.SetOperationToSubtract()
 545
 546        elif op in ["*", "multiply", "times"]:
 547            if K:
 548                mat.SetOperationToMultiplyByK()
 549            else:
 550                mat.SetOperationToMultiply()
 551
 552        elif op in ["/", "divide"]:
 553            if K:
 554                mat.SetConstantK(1.0 / K)
 555                mat.SetOperationToMultiplyByK()
 556            else:
 557                mat.SetOperationToDivide()
 558
 559        elif op in ["1/x", "invert"]:
 560            mat.SetOperationToInvert()
 561        elif op in ["sin"]:
 562            mat.SetOperationToSin()
 563        elif op in ["cos"]:
 564            mat.SetOperationToCos()
 565        elif op in ["exp"]:
 566            mat.SetOperationToExp()
 567        elif op in ["log"]:
 568            mat.SetOperationToLog()
 569        elif op in ["abs"]:
 570            mat.SetOperationToAbsoluteValue()
 571        elif op in ["**2", "square"]:
 572            mat.SetOperationToSquare()
 573        elif op in ["sqrt", "sqr"]:
 574            mat.SetOperationToSquareRoot()
 575        elif op in ["min"]:
 576            mat.SetOperationToMin()
 577        elif op in ["max"]:
 578            mat.SetOperationToMax()
 579        elif op in ["atan"]:
 580            mat.SetOperationToATAN()
 581        elif op in ["atan2"]:
 582            mat.SetOperationToATAN2()
 583        else:
 584            vedo.logger.error(f"unknown operation {operation}")
 585            raise RuntimeError()
 586        mat.Update()
 587
 588        self._update(mat.GetOutput())
 589
 590        self.pipeline = utils.OperationNode(
 591            f"operation\n{op}", parents=[self, volume2],
 592            shape="cylinder", c="#4cc9f0",
 593        )
 594        return self
 595
 596    def frequency_pass_filter(self, low_cutoff=None, high_cutoff=None, order=1):
 597        """
 598        Low-pass and high-pass filtering become trivial in the frequency domain.
 599        A portion of the pixels/voxels are simply masked or attenuated.
 600        This function applies a high pass Butterworth filter that attenuates the
 601        frequency domain image.
 602
 603        The gradual attenuation of the filter is important.
 604        A simple high-pass filter would simply mask a set of pixels in the frequency domain,
 605        but the abrupt transition would cause a ringing effect in the spatial domain.
 606
 607        Arguments:
 608            low_cutoff : (list)
 609                the cutoff frequencies for x, y and z
 610            high_cutoff : (list)
 611                the cutoff frequencies for x, y and z
 612            order : (int)
 613                order determines sharpness of the cutoff curve
 614        """
 615        # https://lorensen.github.io/VTKExamples/site/Cxx/ImageProcessing/IdealHighPass
 616        fft = vtk.vtkImageFFT()
 617        fft.SetInputData(self._data)
 618        fft.Update()
 619        out = fft.GetOutput()
 620
 621        if high_cutoff:
 622            blp = vtk.vtkImageButterworthLowPass()
 623            blp.SetInputData(out)
 624            blp.SetCutOff(high_cutoff)
 625            blp.SetOrder(order)
 626            blp.Update()
 627            out = blp.GetOutput()
 628
 629        if low_cutoff:
 630            bhp = vtk.vtkImageButterworthHighPass()
 631            bhp.SetInputData(out)
 632            bhp.SetCutOff(low_cutoff)
 633            bhp.SetOrder(order)
 634            bhp.Update()
 635            out = bhp.GetOutput()
 636
 637        rfft = vtk.vtkImageRFFT()
 638        rfft.SetInputData(out)
 639        rfft.Update()
 640
 641        ecomp = vtk.vtkImageExtractComponents()
 642        ecomp.SetInputData(rfft.GetOutput())
 643        ecomp.SetComponents(0)
 644        ecomp.Update()
 645        self._update(ecomp.GetOutput())
 646        self.pipeline = utils.OperationNode("frequency_pass_filter", parents=[self], c="#4cc9f0")
 647        return self
 648
 649    def smooth_gaussian(self, sigma=(2, 2, 2), radius=None):
 650        """
 651        Performs a convolution of the input Volume with a gaussian.
 652
 653        Arguments:
 654            sigma : (float, list)
 655                standard deviation(s) in voxel units.
 656                A list can be given to smooth in the three direction differently.
 657            radius : (float, list)
 658                radius factor(s) determine how far out the gaussian
 659                kernel will go before being clamped to zero. A list can be given too.
 660        """
 661        gsf = vtk.vtkImageGaussianSmooth()
 662        gsf.SetDimensionality(3)
 663        gsf.SetInputData(self.imagedata())
 664        if utils.is_sequence(sigma):
 665            gsf.SetStandardDeviations(sigma)
 666        else:
 667            gsf.SetStandardDeviation(sigma)
 668        if radius is not None:
 669            if utils.is_sequence(radius):
 670                gsf.SetRadiusFactors(radius)
 671            else:
 672                gsf.SetRadiusFactor(radius)
 673        gsf.Update()
 674        self._update(gsf.GetOutput())
 675        self.pipeline = utils.OperationNode("smooth_gaussian", parents=[self], c="#4cc9f0")
 676        return self
 677
 678    def smooth_median(self, neighbours=(2, 2, 2)):
 679        """
 680        Median filter that replaces each pixel with the median value
 681        from a rectangular neighborhood around that pixel.
 682        """
 683        imgm = vtk.vtkImageMedian3D()
 684        imgm.SetInputData(self.imagedata())
 685        if utils.is_sequence(neighbours):
 686            imgm.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
 687        else:
 688            imgm.SetKernelSize(neighbours, neighbours, neighbours)
 689        imgm.Update()
 690        self._update(imgm.GetOutput())
 691        self.pipeline = utils.OperationNode("smooth_median", parents=[self], c="#4cc9f0")
 692        return self
 693
 694    def erode(self, neighbours=(2, 2, 2)):
 695        """
 696        Replace a voxel with the minimum over an ellipsoidal neighborhood of voxels.
 697        If `neighbours` of an axis is 1, no processing is done on that axis.
 698
 699        Examples:
 700            - [erode_dilate.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/erode_dilate.py)
 701        
 702                ![](https://vedo.embl.es/images/volumetric/erode_dilate.png)
 703        """
 704        ver = vtk.vtkImageContinuousErode3D()
 705        ver.SetInputData(self._data)
 706        ver.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
 707        ver.Update()
 708        self._update(ver.GetOutput())
 709        self.pipeline = utils.OperationNode("erode", parents=[self], c="#4cc9f0")
 710        return self
 711
 712    def dilate(self, neighbours=(2, 2, 2)):
 713        """
 714        Replace a voxel with the maximum over an ellipsoidal neighborhood of voxels.
 715        If `neighbours` of an axis is 1, no processing is done on that axis.
 716
 717        Examples:
 718            - [erode_dilate.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/erode_dilate.py)
 719        """
 720        ver = vtk.vtkImageContinuousDilate3D()
 721        ver.SetInputData(self._data)
 722        ver.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
 723        ver.Update()
 724        self._update(ver.GetOutput())
 725        self.pipeline = utils.OperationNode("dilate", parents=[self], c="#4cc9f0")
 726        return self
 727
 728    def magnitude(self):
 729        """Colapses components with magnitude function."""
 730        imgm = vtk.vtkImageMagnitude()
 731        imgm.SetInputData(self.imagedata())
 732        imgm.Update()
 733        self._update(imgm.GetOutput())
 734        self.pipeline = utils.OperationNode("magnitude", parents=[self], c="#4cc9f0")
 735        return self
 736
 737    def topoints(self):
 738        """
 739        Extract all image voxels as points.
 740        This function takes an input `Volume` and creates an `Mesh`
 741        that contains the points and the point attributes.
 742
 743        Examples:
 744            - [vol2points.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/vol2points.py)
 745        """
 746        v2p = vtk.vtkImageToPoints()
 747        v2p.SetInputData(self.imagedata())
 748        v2p.Update()
 749        mpts = vedo.Points(v2p.GetOutput())
 750        mpts.pipeline = utils.OperationNode("topoints", parents=[self], c="#4cc9f0:#e9c46a")
 751        return mpts
 752
 753    def euclidean_distance(self, anisotropy=False, max_distance=None):
 754        """
 755        Implementation of the Euclidean DT (Distance Transform) using Saito's algorithm.
 756        The distance map produced contains the square of the Euclidean distance values.
 757        The algorithm has a O(n^(D+1)) complexity over n x n x...x n images in D dimensions.
 758
 759        Check out also: https://en.wikipedia.org/wiki/Distance_transform
 760
 761        Arguments:
 762            anisotropy : bool
 763                used to define whether Spacing should be used in the
 764                computation of the distances.
 765            max_distance : bool
 766                any distance bigger than max_distance will not be
 767                computed but set to this specified value instead.
 768
 769        Examples:
 770            - [euclidian_dist.py](examples/volumetric/euclidian_dist.py)
 771        """
 772        euv = vtk.vtkImageEuclideanDistance()
 773        euv.SetInputData(self._data)
 774        euv.SetConsiderAnisotropy(anisotropy)
 775        if max_distance is not None:
 776            euv.InitializeOn()
 777            euv.SetMaximumDistance(max_distance)
 778        euv.SetAlgorithmToSaito()
 779        euv.Update()
 780        vol = Volume(euv.GetOutput())
 781        vol.pipeline = utils.OperationNode("euclidean_distance", parents=[self], c="#4cc9f0")
 782        return vol
 783
 784    def correlation_with(self, vol2, dim=2):
 785        """
 786        Find the correlation between two volumetric data sets.
 787        Keyword `dim` determines whether the correlation will be 3D, 2D or 1D.
 788        The default is a 2D Correlation.
 789
 790        The output size will match the size of the first input.
 791        The second input is considered the correlation kernel.
 792        """
 793        imc = vtk.vtkImageCorrelation()
 794        imc.SetInput1Data(self._data)
 795        imc.SetInput2Data(vol2.imagedata())
 796        imc.SetDimensionality(dim)
 797        imc.Update()
 798        vol = Volume(imc.GetOutput())
 799
 800        vol.pipeline = utils.OperationNode(
 801            "correlation_with", parents=[self, vol2], c="#4cc9f0",
 802        )
 803        return vol
 804
 805    def scale_voxels(self, scale=1):
 806        """Scale the voxel content by factor `scale`."""
 807        rsl = vtk.vtkImageReslice()
 808        rsl.SetInputData(self.imagedata())
 809        rsl.SetScalarScale(scale)
 810        rsl.Update()
 811        self._update(rsl.GetOutput())
 812        self.pipeline = utils.OperationNode(
 813            f"scale_voxels\nscale={scale}", parents=[self], c="#4cc9f0")
 814        return self
 815
 816
 817##########################################################################
 818class Volume(BaseVolume, BaseGrid, vtk.vtkVolume):
 819    """
 820    Class to describe dataset that are defined on "voxels":
 821    the 3D equivalent of 2D pixels.
 822    """
 823    def __init__(
 824        self,
 825        inputobj=None,
 826        c="RdBu_r",
 827        alpha=(0.0, 0.0, 0.2, 0.4, 0.8, 1.0),
 828        alpha_gradient=None,
 829        alpha_unit=1,
 830        mode=0,
 831        spacing=None,
 832        dims=None,
 833        origin=None,
 834        mapper="smart",
 835    ):
 836        """
 837        This class can be initialized with a numpy object, a `vtkImageData`
 838        or a list of 2D bmp files.
 839
 840        Arguments:
 841            c : (list, str)
 842                sets colors along the scalar range, or a matplotlib color map name
 843            alphas : (float, list)
 844                sets transparencies along the scalar range
 845            alpha_unit : (float)
 846                low values make composite rendering look brighter and denser
 847            origin : (list)
 848                set volume origin coordinates
 849            spacing : (list)
 850                voxel dimensions in x, y and z.
 851            dims : (list)
 852                specify the dimensions of the volume.
 853            mapper : (str)
 854                either 'gpu', 'opengl_gpu', 'fixed' or 'smart'
 855            mode : (int)
 856                define the volumetric rendering style:
 857                    - 0, composite rendering
 858                    - 1, maximum projection
 859                    - 2, minimum projection
 860                    - 3, average projection
 861                    - 4, additive mode
 862
 863                <br>The default mode is "composite" where the scalar values are sampled through
 864                the volume and composited in a front-to-back scheme through alpha blending.
 865                The final color and opacity is determined using the color and opacity transfer
 866                functions specified in alpha keyword.
 867
 868                Maximum and minimum intensity blend modes use the maximum and minimum
 869                scalar values, respectively, along the sampling ray.
 870                The final color and opacity is determined by passing the resultant value
 871                through the color and opacity transfer functions.
 872
 873                Additive blend mode accumulates scalar values by passing each value
 874                through the opacity transfer function and then adding up the product
 875                of the value and its opacity. In other words, the scalar values are scaled
 876                using the opacity transfer function and summed to derive the final color.
 877                Note that the resulting image is always grayscale i.e. aggregated values
 878                are not passed through the color transfer function.
 879                This is because the final value is a derived value and not a real data value
 880                along the sampling ray.
 881
 882                Average intensity blend mode works similar to the additive blend mode where
 883                the scalar values are multiplied by opacity calculated from the opacity
 884                transfer function and then added.
 885                The additional step here is to divide the sum by the number of samples
 886                taken through the volume.
 887                As is the case with the additive intensity projection, the final image will
 888                always be grayscale i.e. the aggregated values are not passed through the
 889                color transfer function.
 890
 891        Example:
 892            ```python
 893            from vedo import Volume
 894            vol = Volume("path/to/mydata/rec*.bmp")
 895            vol.show()
 896            ```
 897
 898        Examples:
 899            - [numpy2volume1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/numpy2volume1.py)
 900
 901                ![](https://vedo.embl.es/images/volumetric/numpy2volume1.png)
 902
 903            - [read_volume2.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/read_volume2.py)
 904
 905                ![](https://vedo.embl.es/images/volumetric/read_volume2.png)
 906
 907        .. note:: 
 908            if a `list` of values is used for `alphas` this is interpreted
 909            as a transfer function along the range of the scalar.
 910        """
 911        vtk.vtkVolume.__init__(self)
 912        BaseGrid.__init__(self)
 913        BaseVolume.__init__(self)
 914        # super().__init__()
 915        
 916        ###################
 917        if isinstance(inputobj, str):
 918
 919            if "https://" in inputobj:
 920                inputobj = vedo.io.download(inputobj, verbose=False)  # fpath
 921            elif os.path.isfile(inputobj):
 922                pass
 923            else:
 924                inputobj = sorted(glob.glob(inputobj))
 925
 926        ###################
 927        if "gpu" in mapper:
 928            self._mapper = vtk.vtkGPUVolumeRayCastMapper()
 929        elif "opengl_gpu" in mapper:
 930            self._mapper = vtk.vtkOpenGLGPUVolumeRayCastMapper()
 931        elif "smart" in mapper:
 932            self._mapper = vtk.vtkSmartVolumeMapper()
 933        elif "fixed" in mapper:
 934            self._mapper = vtk.vtkFixedPointVolumeRayCastMapper()
 935        elif isinstance(mapper, vtk.vtkMapper):
 936            self._mapper = mapper
 937        else:
 938            print("Error unknown mapper type", [mapper])
 939            raise RuntimeError()
 940        self.SetMapper(self._mapper)
 941
 942        ###################
 943        inputtype = str(type(inputobj))
 944
 945        # print('Volume inputtype', inputtype, c='b')
 946
 947        if inputobj is None:
 948            img = vtk.vtkImageData()
 949
 950        elif utils.is_sequence(inputobj):
 951
 952            if isinstance(inputobj[0], str) and ".bmp" in inputobj[0].lower():
 953                # scan sequence of BMP files
 954                ima = vtk.vtkImageAppend()
 955                ima.SetAppendAxis(2)
 956                pb = utils.ProgressBar(0, len(inputobj))
 957                for i in pb.range():
 958                    f = inputobj[i]
 959                    if "_rec_spr.bmp" in f:
 960                        continue
 961                    picr = vtk.vtkBMPReader()
 962                    picr.SetFileName(f)
 963                    picr.Update()
 964                    mgf = vtk.vtkImageMagnitude()
 965                    mgf.SetInputData(picr.GetOutput())
 966                    mgf.Update()
 967                    ima.AddInputData(mgf.GetOutput())
 968                    pb.print("loading...")
 969                ima.Update()
 970                img = ima.GetOutput()
 971
 972            else:
 973
 974                if len(inputobj.shape) == 1:
 975                    varr = utils.numpy2vtk(inputobj)
 976                else:
 977                    varr = utils.numpy2vtk(inputobj.ravel(order="F"))
 978                varr.SetName("input_scalars")
 979
 980                img = vtk.vtkImageData()
 981                if dims is not None:
 982                    img.SetDimensions(dims[2],dims[1],dims[0])
 983                else:
 984                    if len(inputobj.shape)==1:
 985                        vedo.logger.error("must set dimensions (dims keyword) in Volume")
 986                        raise RuntimeError()
 987                    img.SetDimensions(inputobj.shape)
 988                img.GetPointData().AddArray(varr)
 989                img.GetPointData().SetActiveScalars(varr.GetName())
 990
 991        elif "ImageData" in inputtype:
 992            img = inputobj
 993
 994        elif isinstance(inputobj, Volume):
 995            img = inputobj.inputdata()
 996
 997        elif "UniformGrid" in inputtype:
 998            img = inputobj
 999
1000        elif hasattr(inputobj, "GetOutput"):  # passing vtk object, try extract imagdedata
1001            if hasattr(inputobj, "Update"):
1002                inputobj.Update()
1003            img = inputobj.GetOutput()
1004
1005        elif isinstance(inputobj, str):
1006            if "https://" in inputobj:
1007                inputobj = vedo.io.download(inputobj, verbose=False)
1008            img = vedo.io.loadImageData(inputobj)
1009
1010        else:
1011            vedo.logger.error(f"cannot understand input type {inputtype}")
1012            return
1013
1014        if dims is not None:
1015            img.SetDimensions(dims)
1016
1017        if origin is not None:
1018            img.SetOrigin(origin)  ### DIFFERENT from volume.origin()!
1019
1020        if spacing is not None:
1021            img.SetSpacing(spacing)
1022
1023        self._data = img
1024        self._mapper.SetInputData(img)
1025
1026        if img.GetPointData().GetScalars():
1027            if img.GetPointData().GetScalars().GetNumberOfComponents() == 1:
1028                self.mode(mode).color(c).alpha(alpha).alpha_gradient(alpha_gradient)
1029                self.GetProperty().SetShade(True)
1030                self.GetProperty().SetInterpolationType(1)
1031                self.GetProperty().SetScalarOpacityUnitDistance(alpha_unit)
1032
1033        # remember stuff:
1034        self._mode = mode
1035        self._color = c
1036        self._alpha = alpha
1037        self._alpha_grad = alpha_gradient
1038        self._alpha_unit = alpha_unit
1039        
1040        self.pipeline = utils.OperationNode("Volume",
1041            comment=f"dims={tuple(self.dimensions())}", c="#4cc9f0",
1042        )
1043        #######################################################################
1044
1045
1046    def _update(self, data):
1047        self._data = data
1048        self._data.GetPointData().Modified()
1049        self._mapper.SetInputData(data)
1050        self._mapper.Modified()
1051        self._mapper.Update()
1052        return self
1053
1054    def mode(self, mode=None):
1055        """
1056        Define the volumetric rendering mode following this:
1057            - 0, composite rendering
1058            - 1, maximum projection rendering
1059            - 2, minimum projection rendering
1060            - 3, average projection rendering
1061            - 4, additive mode
1062
1063        The default mode is "composite" where the scalar values are sampled through
1064        the volume and composited in a front-to-back scheme through alpha blending.
1065        The final color and opacity is determined using the color and opacity transfer
1066        functions specified in alpha keyword.
1067
1068        Maximum and minimum intensity blend modes use the maximum and minimum
1069        scalar values, respectively, along the sampling ray.
1070        The final color and opacity is determined by passing the resultant value
1071        through the color and opacity transfer functions.
1072
1073        Additive blend mode accumulates scalar values by passing each value
1074        through the opacity transfer function and then adding up the product
1075        of the value and its opacity. In other words, the scalar values are scaled
1076        using the opacity transfer function and summed to derive the final color.
1077        Note that the resulting image is always grayscale i.e. aggregated values
1078        are not passed through the color transfer function.
1079        This is because the final value is a derived value and not a real data value
1080        along the sampling ray.
1081
1082        Average intensity blend mode works similar to the additive blend mode where
1083        the scalar values are multiplied by opacity calculated from the opacity
1084        transfer function and then added.
1085        The additional step here is to divide the sum by the number of samples
1086        taken through the volume.
1087        As is the case with the additive intensity projection, the final image will
1088        always be grayscale i.e. the aggregated values are not passed through the
1089        color transfer function.
1090        """
1091        if mode is None:
1092            return self._mapper.GetBlendMode()
1093
1094        if isinstance(mode, str):
1095            if "comp" in mode:
1096                mode = 0
1097            elif "proj" in mode:
1098                if "max" in mode:
1099                    mode = 1
1100                elif "min" in mode:
1101                    mode = 2
1102                elif "ave" in mode:
1103                    mode = 3
1104                else:
1105                    vedo.logger.warning(f"unknown mode {mode}")
1106                    mode = 0
1107            elif "add" in mode:
1108                mode = 4
1109            else:
1110                vedo.logger.warning(f"unknown mode {mode}")
1111                mode = 0
1112
1113        self._mapper.SetBlendMode(mode)
1114        self._mode = mode
1115        return self
1116
1117    def shade(self, status=None):
1118        """
1119        Set/Get the shading of a Volume.
1120        Shading can be further controlled with `volume.lighting()` method.
1121
1122        If shading is turned on, the mapper may perform shading calculations.
1123        In some cases shading does not apply
1124        (for example, in maximum intensity projection mode).
1125        """
1126        if status is None:
1127            return self.GetProperty().GetShade()
1128        self.GetProperty().SetShade(status)
1129        return self
1130
1131    def cmap(self, c, alpha=None, vmin=None, vmax=None):
1132        """Same as `color()`.
1133
1134        Arguments:
1135            alpha : (list)
1136                use a list to specify transparencies along the scalar range
1137            vmin : (float)
1138                force the min of the scalar range to be this value
1139            vmax : (float)
1140                force the max of the scalar range to be this value
1141        """
1142        return self.color(c, alpha, vmin, vmax)
1143
1144    def jittering(self, status=None):
1145        """
1146        If `True`, each ray traversal direction will be perturbed slightly
1147        using a noise-texture to get rid of wood-grain effects.
1148        """
1149        if hasattr(self._mapper, "SetUseJittering"):  # tetmesh doesnt have it
1150            if status is None:
1151                return self._mapper.GetUseJittering()
1152            self._mapper.SetUseJittering(status)
1153        return self
1154
1155    def mask(self, data):
1156        """
1157        Mask a volume visualization with a binary value.
1158        Needs to specify keyword mapper='gpu'.
1159
1160        Example:
1161        ```python
1162            from vedo import np, Volume, show
1163            data_matrix = np.zeros([75, 75, 75], dtype=np.uint8)
1164            # all voxels have value zero except:
1165            data_matrix[0:35,   0:35,  0:35] = 1
1166            data_matrix[35:55, 35:55, 35:55] = 2
1167            data_matrix[55:74, 55:74, 55:74] = 3
1168            vol = Volume(data_matrix, c=['white','b','g','r'], mapper='gpu')
1169            data_mask = np.zeros_like(data_matrix)
1170            data_mask[10:65, 10:45, 20:75] = 1
1171            vol.mask(data_mask)
1172            show(vol, axes=1).close()
1173        ```
1174        """
1175        mask = Volume(data.astype(np.uint8))
1176        try:
1177            self.mapper().SetMaskTypeToBinary()
1178            self.mapper().SetMaskInput(mask.inputdata())
1179        except AttributeError:
1180            vedo.logger.error("volume.mask() must create the volume with Volume(..., mapper='gpu')")
1181        return self
1182
1183    def alpha_gradient(self, alpha_grad, vmin=None, vmax=None):
1184        """
1185        Assign a set of tranparencies to a volume's gradient
1186        along the range of the scalar value.
1187        A single constant value can also be assigned.
1188        The gradient function is used to decrease the opacity
1189        in the "flat" regions of the volume while maintaining the opacity
1190        at the boundaries between material types.  The gradient is measured
1191        as the amount by which the intensity changes over unit distance.
1192
1193        The format for alpha_grad is the same as for method `volume.alpha()`.
1194        """
1195        if vmin is None:
1196            vmin, _ = self._data.GetScalarRange()
1197        if vmax is None:
1198            _, vmax = self._data.GetScalarRange()
1199        self._alpha_grad = alpha_grad
1200        volumeProperty = self.GetProperty()
1201
1202        if alpha_grad is None:
1203            volumeProperty.DisableGradientOpacityOn()
1204            return self
1205
1206        volumeProperty.DisableGradientOpacityOff()
1207
1208        gotf = volumeProperty.GetGradientOpacity()
1209        if utils.is_sequence(alpha_grad):
1210            alpha_grad = np.array(alpha_grad)
1211            if len(alpha_grad.shape)==1: # user passing a flat list e.g. (0.0, 0.3, 0.9, 1)
1212                for i, al in enumerate(alpha_grad):
1213                    xalpha = vmin + (vmax - vmin) * i / (len(alpha_grad) - 1)
1214                    # Create transfer mapping scalar value to gradient opacity
1215                    gotf.AddPoint(xalpha, al)
1216            elif len(alpha_grad.shape) == 2:  # user passing [(x0,alpha0), ...]
1217                gotf.AddPoint(vmin, alpha_grad[0][1])
1218                for xalpha, al in alpha_grad:
1219                    # Create transfer mapping scalar value to opacity
1220                    gotf.AddPoint(xalpha, al)
1221                gotf.AddPoint(vmax, alpha_grad[-1][1])
1222            # print("alpha_grad at", round(xalpha, 1), "\tset to", al)
1223        else:
1224            gotf.AddPoint(vmin, alpha_grad)  # constant alpha_grad
1225            gotf.AddPoint(vmax, alpha_grad)
1226        return self
1227
1228    def component_weight(self, i, weight):
1229        """Set the scalar component weight in range [0,1]."""
1230        self.GetProperty().SetComponentWeight(i, weight)
1231        return self
1232
1233    def xslice(self, i):
1234        """Extract the slice at index `i` of volume along x-axis."""
1235        vslice = vtk.vtkImageDataGeometryFilter()
1236        vslice.SetInputData(self.imagedata())
1237        nx, ny, nz = self.imagedata().GetDimensions()
1238        if i > nx - 1:
1239            i = nx - 1
1240        vslice.SetExtent(i, i, 0, ny, 0, nz)
1241        vslice.Update()
1242        m = Mesh(vslice.GetOutput())
1243        m.pipeline = utils.OperationNode(f"xslice {i}", parents=[self], c="#4cc9f0:#e9c46a")
1244        return m
1245
1246    def yslice(self, j):
1247        """Extract the slice at index `j` of volume along y-axis."""
1248        vslice = vtk.vtkImageDataGeometryFilter()
1249        vslice.SetInputData(self.imagedata())
1250        nx, ny, nz = self.imagedata().GetDimensions()
1251        if j > ny - 1:
1252            j = ny - 1
1253        vslice.SetExtent(0, nx, j, j, 0, nz)
1254        vslice.Update()
1255        m = Mesh(vslice.GetOutput())
1256        m.pipeline = utils.OperationNode(f"yslice {j}", parents=[self], c="#4cc9f0:#e9c46a")
1257        return m
1258
1259    def zslice(self, k):
1260        """Extract the slice at index `i` of volume along z-axis."""
1261        vslice = vtk.vtkImageDataGeometryFilter()
1262        vslice.SetInputData(self.imagedata())
1263        nx, ny, nz = self.imagedata().GetDimensions()
1264        if k > nz - 1:
1265            k = nz - 1
1266        vslice.SetExtent(0, nx, 0, ny, k, k)
1267        vslice.Update()
1268        m = Mesh(vslice.GetOutput())
1269        m.pipeline = utils.OperationNode(f"zslice {k}", parents=[self], c="#4cc9f0:#e9c46a")
1270        return m
1271
1272    @deprecated(reason=vedo.colors.red + "Please use slice_plane()" + vedo.colors.reset)
1273    def slicePlane(self, *a, **b):
1274        "Deprecated. Please use `scalar_range()`"
1275        return self.slice_plane(*a, **b)
1276
1277    def slice_plane(self, origin=(0, 0, 0), normal=(1, 1, 1), autocrop=False):
1278        """
1279        Extract the slice along a given plane position and normal.
1280
1281        Example:
1282            - [slicePlane1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/slicePlane1.py)
1283
1284                ![](https://vedo.embl.es/images/volumetric/slicePlane1.gif)
1285        """
1286        reslice = vtk.vtkImageReslice()
1287        reslice.SetInputData(self._data)
1288        reslice.SetOutputDimensionality(2)
1289        newaxis = utils.versor(normal)
1290        pos = np.array(origin)
1291        initaxis = (0, 0, 1)
1292        crossvec = np.cross(initaxis, newaxis)
1293        angle = np.arccos(np.dot(initaxis, newaxis))
1294        T = vtk.vtkTransform()
1295        T.PostMultiply()
1296        T.RotateWXYZ(np.rad2deg(angle), crossvec)
1297        T.Translate(pos)
1298        M = T.GetMatrix()
1299        reslice.SetResliceAxes(M)
1300        reslice.SetInterpolationModeToLinear()
1301        reslice.SetAutoCropOutput(not autocrop)
1302        reslice.Update()
1303        vslice = vtk.vtkImageDataGeometryFilter()
1304        vslice.SetInputData(reslice.GetOutput())
1305        vslice.Update()
1306        msh = Mesh(vslice.GetOutput())
1307        msh.SetOrientation(T.GetOrientation())
1308        msh.SetPosition(pos)
1309        msh.pipeline = utils.OperationNode(
1310            "slice_plane", parents=[self], c="#4cc9f0:#e9c46a")
1311        return msh
1312
1313    def warp(self, source, target, sigma=1, mode="3d", fit=False):
1314        """
1315        Warp volume scalars within a Volume by specifying
1316        source and target sets of points.
1317
1318        Arguments:
1319            source : (Points, list)
1320                the list of source points
1321            target : (Points, list)
1322                the list of target points
1323            fit : (bool)
1324                fit/adapt the old bounding box to the warped geometry
1325        """
1326        if isinstance(source, vedo.Points):
1327            source = source.points()
1328        if isinstance(target, vedo.Points):
1329            target = target.points()
1330
1331        ns = len(source)
1332        ptsou = vtk.vtkPoints()
1333        ptsou.SetNumberOfPoints(ns)
1334        for i in range(ns):
1335            ptsou.SetPoint(i, source[i])
1336
1337        nt = len(target)
1338        if ns != nt:
1339            vedo.logger.error(f"#source {ns} != {nt} #target points")
1340            raise RuntimeError()
1341
1342        pttar = vtk.vtkPoints()
1343        pttar.SetNumberOfPoints(nt)
1344        for i in range(ns):
1345            pttar.SetPoint(i, target[i])
1346
1347        T = vtk.vtkThinPlateSplineTransform()
1348        if mode.lower() == "3d":
1349            T.SetBasisToR()
1350        elif mode.lower() == "2d":
1351            T.SetBasisToR2LogR()
1352        else:
1353            vedo.logger.error(f"unknown mode {mode}")
1354            raise RuntimeError()
1355
1356        T.SetSigma(sigma)
1357        T.SetSourceLandmarks(ptsou)
1358        T.SetTargetLandmarks(pttar)
1359        T.Inverse()
1360        self.transform = T
1361        self.apply_transform(T, fit=fit)
1362        self.pipeline = utils.OperationNode("warp", parents=[self], c="#4cc9f0")
1363        return self
1364
1365    def apply_transform(self, T, fit=False):
1366        """
1367        Apply a transform to the scalars in the volume.
1368
1369        Arguments:
1370            T : (vtkTransform, matrix)
1371                The transformation to be applied
1372            fit : (bool)
1373                fit/adapt the old bounding box to the warped geometry
1374        """
1375        if isinstance(T, vtk.vtkMatrix4x4):
1376            tr = vtk.vtkTransform()
1377            tr.SetMatrix(T)
1378            T = tr
1379
1380        elif utils.is_sequence(T):
1381            M = vtk.vtkMatrix4x4()
1382            n = len(T[0])
1383            for i in range(n):
1384                for j in range(n):
1385                    M.SetElement(i, j, T[i][j])
1386            tr = vtk.vtkTransform()
1387            tr.SetMatrix(M)
1388            T = tr
1389
1390        reslice = vtk.vtkImageReslice()
1391        reslice.SetInputData(self._data)
1392        reslice.SetResliceTransform(T)
1393        reslice.SetOutputDimensionality(3)
1394        reslice.SetInterpolationModeToLinear()
1395
1396        spacing = self._data.GetSpacing()
1397        origin = self._data.GetOrigin()
1398
1399        if fit:
1400            bb = self.box()
1401            if isinstance(T, vtk.vtkThinPlateSplineTransform):
1402                TI = vtk.vtkThinPlateSplineTransform()
1403                TI.DeepCopy(T)
1404                TI.Inverse()
1405            else:
1406                TI = vtk.vtkTransform()
1407                TI.DeepCopy(T)
1408            bb.apply_transform(TI)
1409            bounds = bb.GetBounds()
1410            bounds = (
1411                bounds[0]/spacing[0], bounds[1]/spacing[0],
1412                bounds[2]/spacing[1], bounds[3]/spacing[1],
1413                bounds[4]/spacing[2], bounds[5]/spacing[2],
1414            )
1415            bounds = np.round(bounds).astype(int)
1416            reslice.SetOutputExtent(bounds)
1417            reslice.SetOutputSpacing(spacing[0], spacing[1], spacing[2])
1418            reslice.SetOutputOrigin(origin[0], origin[1], origin[2])
1419
1420        reslice.Update()
1421        self._update(reslice.GetOutput())
1422        self.pipeline = utils.OperationNode("apply_transform", parents=[self], c="#4cc9f0")
1423        return self
1424
1425
1426##########################################################################
1427class VolumeSlice(BaseVolume, Base3DProp, vtk.vtkImageSlice):
1428    """
1429    Derived class of `vtkImageSlice`.
1430    """
1431    def __init__(self, inputobj=None):
1432        """
1433        This class is equivalent to `Volume` except for its representation.
1434        The main purpose of this class is to be used in conjunction with `Volume`
1435        for visualization using `mode="image"`.
1436        """
1437        vtk.vtkImageSlice.__init__(self)
1438        Base3DProp.__init__(self)
1439        BaseVolume.__init__(self)
1440        # super().__init__()
1441
1442        self._mapper = vtk.vtkImageResliceMapper()
1443        self._mapper.SliceFacesCameraOn()
1444        self._mapper.SliceAtFocalPointOn()
1445        self._mapper.SetAutoAdjustImageQuality(False)
1446        self._mapper.BorderOff()
1447
1448        self.lut = None
1449
1450        self.property = vtk.vtkImageProperty()
1451        self.property.SetInterpolationTypeToLinear()
1452        self.SetProperty(self.property)
1453
1454        ###################
1455        if isinstance(inputobj, str):
1456            if "https://" in inputobj:
1457                inputobj = vedo.io.download(inputobj, verbose=False)  # fpath
1458            elif os.path.isfile(inputobj):
1459                pass
1460            else:
1461                inputobj = sorted(glob.glob(inputobj))
1462
1463        ###################
1464        inputtype = str(type(inputobj))
1465
1466        if inputobj is None:
1467            img = vtk.vtkImageData()
1468
1469        if isinstance(inputobj, Volume):
1470            img = inputobj.imagedata()
1471            self.lut = utils.ctf2lut(inputobj)
1472
1473        elif utils.is_sequence(inputobj):
1474
1475            if isinstance(inputobj[0], str):  # scan sequence of BMP files
1476                ima = vtk.vtkImageAppend()
1477                ima.SetAppendAxis(2)
1478                pb = utils.ProgressBar(0, len(inputobj))
1479                for i in pb.range():
1480                    f = inputobj[i]
1481                    picr = vtk.vtkBMPReader()
1482                    picr.SetFileName(f)
1483                    picr.Update()
1484                    mgf = vtk.vtkImageMagnitude()
1485                    mgf.SetInputData(picr.GetOutput())
1486                    mgf.Update()
1487                    ima.AddInputData(mgf.GetOutput())
1488                    pb.print("loading...")
1489                ima.Update()
1490                img = ima.GetOutput()
1491
1492            else:
1493                if "ndarray" not in inputtype:
1494                    inputobj = np.array(inputobj)
1495
1496                if len(inputobj.shape) == 1:
1497                    varr = utils.numpy2vtk(inputobj, dtype=float)
1498                else:
1499                    if len(inputobj.shape) > 2:
1500                        inputobj = np.transpose(inputobj, axes=[2, 1, 0])
1501                    varr = utils.numpy2vtk(inputobj.ravel(order="F"), dtype=float)
1502                varr.SetName("input_scalars")
1503
1504                img = vtk.vtkImageData()
1505                img.SetDimensions(inputobj.shape)
1506                img.GetPointData().AddArray(varr)
1507                img.GetPointData().SetActiveScalars(varr.GetName())
1508
1509        elif "ImageData" in inputtype:
1510            img = inputobj
1511
1512        elif isinstance(inputobj, Volume):
1513            img = inputobj.inputdata()
1514
1515        elif "UniformGrid" in inputtype:
1516            img = inputobj
1517
1518        elif hasattr(inputobj, "GetOutput"): # passing vtk object, try extract imagdedata
1519            if hasattr(inputobj, "Update"):
1520                inputobj.Update()
1521            img = inputobj.GetOutput()
1522
1523        elif isinstance(inputobj, str):
1524            if "https://" in inputobj:
1525                inputobj = vedo.io.download(inputobj, verbose=False)
1526            img = vedo.io.loadImageData(inputobj)
1527
1528        else:
1529            vedo.logger.error(f"cannot understand input type {inputtype}")
1530            return
1531
1532        self._data = img
1533        self._mapper.SetInputData(img)
1534        self.SetMapper(self._mapper)
1535
1536    def bounds(self):
1537        """Return the bounding box as [x0,x1, y0,y1, z0,z1]"""
1538        bns = [0, 0, 0, 0, 0, 0]
1539        self.GetBounds(bns)
1540        return bns
1541
1542    def colorize(self, lut=None, fix_scalar_range=False):
1543        """
1544        Assign a LUT (Look Up Table) to colorize the slice, leave it `None`
1545        to reuse an existing Volume color map.
1546        Use "bw" for automatic black and white.
1547        """
1548        if lut is None and self.lut:
1549            self.property.SetLookupTable(self.lut)
1550        elif isinstance(lut, vtk.vtkLookupTable):
1551            self.property.SetLookupTable(lut)
1552        elif lut == "bw":
1553            self.property.SetLookupTable(None)
1554        self.property.SetUseLookupTableScalarRange(fix_scalar_range)
1555        return self
1556
1557    def alpha(self, value):
1558        """Set opacity to the slice"""
1559        self.property.SetOpacity(value)
1560        return self
1561
1562    def auto_adjust_quality(self, value=True):
1563        """Automatically reduce the rendering quality for greater speed when interacting"""
1564        self._mapper.SetAutoAdjustImageQuality(value)
1565        return self
1566
1567    def slab(self, thickness=0, mode=0, sample_factor=2):
1568        """
1569        Make a thick slice (slab).
1570
1571        Arguments:
1572            thickness : (float)
1573                set the slab thickness, for thick slicing
1574            mode : (int)
1575                The slab type:
1576                    0 = min
1577                    1 = max
1578                    2 = mean
1579                    3 = sum
1580            sample_factor : (float)
1581                Set the number of slab samples to use as a factor of the number of input slices
1582                within the slab thickness. The default value is 2, but 1 will increase speed
1583                with very little loss of quality.
1584        """
1585        self._mapper.SetSlabThickness(thickness)
1586        self._mapper.SetSlabType(mode)
1587        self._mapper.SetSlabSampleFactor(sample_factor)
1588        return self
1589
1590    def face_camera(self, value=True):
1591        """Make the slice always face the camera or not."""
1592        self._mapper.SetSliceFacesCameraOn(value)
1593        return self
1594
1595    def jump_to_nearest_slice(self, value=True):
1596        """
1597        This causes the slicing to occur at the closest slice to the focal point,
1598        instead of the default behavior where a new slice is interpolated between
1599        the original slices.
1600        Nothing happens if the plane is oblique to the original slices."""
1601        self.SetJumpToNearestSlice(value)
1602        return self
1603
1604    def fill_background(self, value=True):
1605        """
1606        Instead of rendering only to the image border, 
1607        render out to the viewport boundary with the background color. 
1608        The background color will be the lowest color on the lookup
1609        table that is being used for the image."""
1610        self._mapper.SetBackground(value)
1611        return self
1612
1613    def lighting(self, window, level, ambient=1.0, diffuse=0.0):
1614        """Assign the values for window and color level."""
1615        self.property.SetColorWindow(window)
1616        self.property.SetColorLevel(level)
1617        self.property.SetAmbient(ambient)
1618        self.property.SetDiffuse(diffuse)
1619        return self
class BaseVolume:
 36class BaseVolume:
 37    """
 38    Base class. Do not instantiate.
 39    """
 40    def __init__(self):
 41        """Base class. Do not instantiate."""
 42
 43        self._data = None
 44        self._mapper = None
 45        self.transform = None
 46
 47    def _repr_html_(self):
 48        """
 49        HTML representation of the Volume object for Jupyter Notebooks.
 50        
 51        Returns:
 52            HTML text with the image and some properties.
 53        """
 54        import io
 55        import base64
 56        from PIL import Image
 57
 58        library_name = "vedo.volume.Volume"
 59        help_url = "https://vedo.embl.es/docs/vedo/volume.html"
 60
 61        arr = self.thumbnail(azimuth=0, elevation=-60, zoom=1.4, axes=True)
 62
 63        im = Image.fromarray(arr)
 64        buffered = io.BytesIO()
 65        im.save(buffered, format="PNG", quality=100)
 66        encoded = base64.b64encode(buffered.getvalue()).decode("utf-8")
 67        url = "data:image/png;base64," + encoded
 68        image = f"<img src='{url}'></img>"
 69
 70        # statisitics
 71        bounds = "<br/>".join(
 72            [
 73                utils.precision(min_x,4) + " ... " + utils.precision(max_x,4)
 74                for min_x, max_x in zip(self.bounds()[::2], self.bounds()[1::2])
 75            ]
 76        )
 77
 78        help_text = ""
 79        if self.name:
 80            help_text += f"<b> {self.name}: &nbsp&nbsp</b>"
 81        help_text += '<b><a href="' + help_url + '" target="_blank">' + library_name + "</a></b>" 
 82        if self.filename:
 83            dots = ""
 84            if len(self.filename) > 30:
 85                dots = "..."
 86            help_text += f"<br/><code><i>({dots}{self.filename[-30:]})</i></code>"
 87        
 88        pdata = ""
 89        if self._data.GetPointData().GetScalars():
 90            if self._data.GetPointData().GetScalars().GetName():
 91                name = self._data.GetPointData().GetScalars().GetName()
 92                pdata = "<tr><td><b> point data array </b></td><td>" + name + "</td></tr>"
 93
 94        cdata = ""
 95        if self._data.GetCellData().GetScalars():
 96            if self._data.GetCellData().GetScalars().GetName():
 97                name = self._data.GetCellData().GetScalars().GetName()
 98                cdata = "<tr><td><b> voxel data array </b></td><td>" + name + "</td></tr>"
 99
100        img = self.GetMapper().GetInput()
101
102        all = [
103            "<table>",
104            "<tr>", 
105            "<td>", image, "</td>",
106            "<td style='text-align: center; vertical-align: center;'><br/>", help_text,
107            "<table>",
108            "<tr><td><b> bounds </b> <br/> (x/y/z) </td><td>" + str(bounds) + "</td></tr>",
109            "<tr><td><b> dimensions </b></td><td>" + str(img.GetDimensions()) + "</td></tr>",
110            "<tr><td><b> voxel spacing </b></td><td>" + utils.precision(img.GetSpacing(),3) + "</td></tr>",
111            "<tr><td><b> in memory size </b></td><td>" + str(int(img.GetActualMemorySize() / 1024)) + "MB</td></tr>",
112            pdata,
113            cdata,
114            "<tr><td><b> scalar range </b></td><td>" + utils.precision(img.GetScalarRange(), 4) + "</td></tr>",
115            "</table>",
116            "</table>",
117        ]
118        return "\n".join(all)
119
120    def _update(self, img):
121        self._data = img
122        self._data.GetPointData().Modified()
123        self._mapper.SetInputData(img)
124        self._mapper.Modified()
125        self._mapper.Update()
126        return self
127
128    def clone(self):
129        """Return a clone copy of the Volume."""
130        newimg = vtk.vtkImageData()
131        newimg.CopyStructure(self._data)
132        newimg.CopyAttributes(self._data)
133
134        newvol = Volume(newimg)
135        prop = vtk.vtkVolumeProperty()
136        prop.DeepCopy(self.GetProperty())
137        newvol.SetProperty(prop)
138        newvol.SetOrigin(self.GetOrigin())
139        newvol.SetScale(self.GetScale())
140        newvol.SetOrientation(self.GetOrientation())
141        newvol.SetPosition(self.GetPosition())
142        newvol.pipeline = utils.OperationNode(
143            "clone", parents=[self], c="#bbd0ff", shape="diamond")
144        return newvol
145
146    def imagedata(self):
147        """Return the underlying `vtkImagaData` object."""
148        return self._data
149
150    def tonumpy(self):
151        """
152        Get read-write access to voxels of a Volume object as a numpy array.
153
154        When you set values in the output image, you don't want numpy to reallocate the array
155        but instead set values in the existing array, so use the [:] operator.
156
157        Example:
158            `arr[:] = arr*2 + 15`
159
160        If the array is modified add a call to:
161        `volume.imagedata().GetPointData().GetScalars().Modified()`
162        when all your modifications are completed.
163        """
164        narray_shape = tuple(reversed(self._data.GetDimensions()))
165
166        scals = self._data.GetPointData().GetScalars()
167        comps = scals.GetNumberOfComponents()
168        if comps == 1:
169            narray = utils.vtk2numpy(scals).reshape(narray_shape)
170            narray = np.transpose(narray, axes=[2, 1, 0])
171        else:
172            narray = utils.vtk2numpy(scals).reshape(*narray_shape, comps)
173            narray = np.transpose(narray, axes=[2, 1, 0, 3])
174
175        # narray = utils.vtk2numpy(self._data.GetPointData().GetScalars()).reshape(narray_shape)
176        # narray = np.transpose(narray, axes=[2, 1, 0])
177
178        return narray
179
180    def dimensions(self):
181        """Return the nr. of voxels in the 3 dimensions."""
182        return np.array(self._data.GetDimensions())
183
184    @deprecated(reason=vedo.colors.red + "Please use scalar_range()" + vedo.colors.reset)
185    def scalarRange(self):
186        "Deprecated. Please use `scalar_range()`."
187        return self.scalar_range()
188
189    def scalar_range(self):
190        """Return the range of the scalar values."""
191        return np.array(self._data.GetScalarRange())
192
193    def spacing(self, s=None):
194        """Set/get the voxels size in the 3 dimensions."""
195        if s is not None:
196            self._data.SetSpacing(s)
197            return self
198        return np.array(self._data.GetSpacing())
199
200    # def pos(self, p=None): # conflicts with API of base.x()
201    #     """Same effect as calling `origin()`."""
202    #     return self.origin(p)
203
204    def origin(self, s=None):
205        """Set/get the origin of the volumetric dataset."""
206        ### supersedes base.origin()
207        ### DIFFERENT from base.origin()!
208        if s is not None:
209            self._data.SetOrigin(s)
210            return self
211        return np.array(self._data.GetOrigin())
212
213    def center(self, p=None):
214        """Set/get the center of the volumetric dataset."""
215        if p is not None:
216            self._data.SetCenter(p)
217            return self
218        return np.array(self._data.GetCenter())
219
220    def permute_axes(self, x, y, z):
221        """
222        Reorder the axes of the Volume by specifying
223        the input axes which are supposed to become the new X, Y, and Z.
224        """
225        imp = vtk.vtkImagePermute()
226        imp.SetFilteredAxes(x, y, z)
227        imp.SetInputData(self.imagedata())
228        imp.Update()
229        self._update(imp.GetOutput())
230        self.pipeline = utils.OperationNode(
231            f"permute_axes\n{tuple(x,y,z)}", parents=[self], c="#4cc9f0")
232        return self
233
234    def resample(self, new_spacing, interpolation=1):
235        """
236        Resamples a `Volume` to be larger or smaller.
237
238        This method modifies the spacing of the input.
239        Linear interpolation is used to resample the data.
240
241        Arguments:
242            new_spacing : (list)
243                a list of 3 new spacings for the 3 axes
244            interpolation : (int)
245                0=nearest_neighbor, 1=linear, 2=cubic
246        """
247        rsp = vtk.vtkImageResample()
248        oldsp = self.spacing()
249        for i in range(3):
250            if oldsp[i] != new_spacing[i]:
251                rsp.SetAxisOutputSpacing(i, new_spacing[i])
252        rsp.InterpolateOn()
253        rsp.SetInterpolationMode(interpolation)
254        rsp.OptimizationOn()
255        rsp.Update()
256        self._update(rsp.GetOutput())
257        self.pipeline = utils.OperationNode(
258            f"resample\n{tuple(new_spacing)}", parents=[self], c="#4cc9f0")
259        return self
260
261    def interpolation(self, itype):
262        """
263        Set interpolation type.
264
265        0=nearest neighbour, 1=linear
266        """
267        self.property.SetInterpolationType(itype)
268        return self
269
270    def threshold(self, above=None, below=None, replace=None, replace_value=None):
271        """
272        Binary or continuous volume thresholding.
273        Find the voxels that contain a value above/below the input values
274        and replace them with a new value (default is 0).
275        """
276        th = vtk.vtkImageThreshold()
277        th.SetInputData(self.imagedata())
278
279        # sanity checks
280        if above is not None and below is not None:
281            if above == below:
282                return self
283            if above > below:
284                vedo.logger.warning("in volume.threshold(), above > below, skip.")
285                return self
286
287        ## cases
288        if below is not None and above is not None:
289            th.ThresholdBetween(above, below)
290
291        elif above is not None:
292            th.ThresholdByUpper(above)
293
294        elif below is not None:
295            th.ThresholdByLower(below)
296
297        ##
298        if replace is not None:
299            th.SetReplaceIn(True)
300            th.SetInValue(replace)
301        else:
302            th.SetReplaceIn(False)
303
304        if replace_value is not None:
305            th.SetReplaceOut(True)
306            th.SetOutValue(replace_value)
307        else:
308            th.SetReplaceOut(False)
309
310        th.Update()
311        self._update(th.GetOutput())
312        self.pipeline = utils.OperationNode("threshold", parents=[self], c="#4cc9f0")
313        return self
314
315    def crop(
316            self,
317            left=None, right=None,
318            back=None, front=None,
319            bottom=None, top=None,
320            VOI=()
321        ):
322        """
323        Crop a `Volume` object.
324
325        Arguments:
326            left : (float)
327                fraction to crop from the left plane (negative x)
328            right : (float)
329                fraction to crop from the right plane (positive x)
330            back : (float)
331                fraction to crop from the back plane (negative y)
332            front : (float)
333                fraction to crop from the front plane (positive y)
334            bottom : (float)
335                fraction to crop from the bottom plane (negative z)
336            top : (float)
337                fraction to crop from the top plane (positive z)
338            VOI : (list)
339                extract Volume Of Interest expressed in voxel numbers
340
341        Example:
342            `vol.crop(VOI=(xmin, xmax, ymin, ymax, zmin, zmax)) # all integers nrs`
343        """
344        extractVOI = vtk.vtkExtractVOI()
345        extractVOI.SetInputData(self.imagedata())
346
347        if VOI:
348            extractVOI.SetVOI(VOI)
349        else:
350            d = self.imagedata().GetDimensions()
351            bx0, bx1, by0, by1, bz0, bz1 = 0, d[0]-1, 0, d[1]-1, 0, d[2]-1
352            if left is not None:   bx0 = int((d[0]-1)*left)
353            if right is not None:  bx1 = int((d[0]-1)*(1-right))
354            if back is not None:   by0 = int((d[1]-1)*back)
355            if front is not None:  by1 = int((d[1]-1)*(1-front))
356            if bottom is not None: bz0 = int((d[2]-1)*bottom)
357            if top is not None:    bz1 = int((d[2]-1)*(1-top))
358            extractVOI.SetVOI(bx0, bx1, by0, by1, bz0, bz1)
359        extractVOI.Update()
360        self._update(extractVOI.GetOutput())
361
362        self.pipeline = utils.OperationNode(
363            "crop", parents=[self], c="#4cc9f0",
364            comment=f"dims={tuple(self.dimensions())}",
365        )
366        return self
367
368    def append(self, volumes, axis="z", preserve_extents=False):
369        """
370        Take the components from multiple inputs and merges them into one output.
371        Except for the append axis, all inputs must have the same extent.
372        All inputs must have the same number of scalar components.
373        The output has the same origin and spacing as the first input.
374        The origin and spacing of all other inputs are ignored.
375        All inputs must have the same scalar type.
376
377        Arguments:
378            axis : (int, str)
379                axis expanded to hold the multiple images
380            preserve_extents : (bool)
381                if True, the extent of the inputs is used to place
382                the image in the output. The whole extent of the output is the union of the input
383                whole extents. Any portion of the output not covered by the inputs is set to zero.
384                The origin and spacing is taken from the first input.
385
386        Example:
387            ```python
388            from vedo import Volume, dataurl
389            vol = Volume(dataurl+'embryo.tif')
390            vol.append(vol, axis='x').show().close()
391            ```
392            ![](https://vedo.embl.es/images/feats/volume_append.png)
393        """
394        ima = vtk.vtkImageAppend()
395        ima.SetInputData(self.imagedata())
396        if not utils.is_sequence(volumes):
397            volumes = [volumes]
398        for volume in volumes:
399            if isinstance(volume, vtk.vtkImageData):
400                ima.AddInputData(volume)
401            else:
402                ima.AddInputData(volume.imagedata())
403        ima.SetPreserveExtents(preserve_extents)
404        if axis == "x":
405            axis = 0
406        elif axis == "y":
407            axis = 1
408        elif axis == "z":
409            axis = 2
410        ima.SetAppendAxis(axis)
411        ima.Update()
412        self._update(ima.GetOutput())
413        
414        self.pipeline = utils.OperationNode(
415            "append", parents=[self, *volumes], c="#4cc9f0",
416            comment=f"dims={tuple(self.dimensions())}",
417        )
418        return self
419
420    def resize(self, *newdims):
421        """Increase or reduce the number of voxels of a Volume with interpolation."""
422        old_dims = np.array(self.imagedata().GetDimensions())
423        old_spac = np.array(self.imagedata().GetSpacing())
424        rsz = vtk.vtkImageResize()
425        rsz.SetResizeMethodToOutputDimensions()
426        rsz.SetInputData(self.imagedata())
427        rsz.SetOutputDimensions(newdims)
428        rsz.Update()
429        self._data = rsz.GetOutput()
430        new_spac = old_spac * old_dims / newdims  # keep aspect ratio
431        self._data.SetSpacing(new_spac)
432        self._update(self._data)
433        self.pipeline = utils.OperationNode(
434            "resize", parents=[self], c="#4cc9f0",
435            comment=f"dims={tuple(self.dimensions())}",
436        )
437        return self
438
439    def normalize(self):
440        """Normalize that scalar components for each point."""
441        norm = vtk.vtkImageNormalize()
442        norm.SetInputData(self.imagedata())
443        norm.Update()
444        self._update(norm.GetOutput())
445        self.pipeline = utils.OperationNode("normalize", parents=[self], c="#4cc9f0")
446        return self
447
448    def mirror(self, axis="x"):
449        """
450        Mirror flip along one of the cartesian axes.
451        """
452        img = self.imagedata()
453
454        ff = vtk.vtkImageFlip()
455        ff.SetInputData(img)
456        if axis.lower() == "x":
457            ff.SetFilteredAxis(0)
458        elif axis.lower() == "y":
459            ff.SetFilteredAxis(1)
460        elif axis.lower() == "z":
461            ff.SetFilteredAxis(2)
462        else:
463            vedo.logger.error("mirror must be set to either x, y, z or n")
464            raise RuntimeError()
465        ff.Update()
466        self._update(ff.GetOutput())
467        self.pipeline = utils.OperationNode(f"mirror {axis}", parents=[self], c="#4cc9f0")
468        return self
469
470    def operation(self, operation, volume2=None):
471        """
472        Perform operations with `Volume` objects.
473        Keyword `volume2` can be a constant float.
474
475        Possible operations are:
476        ```
477        +, -, /, 1/x, sin, cos, exp, log,
478        abs, **2, sqrt, min, max, atan, atan2, median,
479        mag, dot, gradient, divergence, laplacian.
480        ```
481
482        Examples:
483            - [volume_operations.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/volume_operations.py)
484        """
485        op = operation.lower()
486        image1 = self._data
487
488        mf = None
489        if op in ["median"]:
490            mf = vtk.vtkImageMedian3D()
491            mf.SetInputData(image1)
492        elif op in ["mag"]:
493            mf = vtk.vtkImageMagnitude()
494            mf.SetInputData(image1)
495        elif op in ["dot", "dotproduct"]:
496            mf = vtk.vtkImageDotProduct()
497            mf.SetInput1Data(image1)
498            mf.SetInput2Data(volume2.imagedata())
499        elif op in ["grad", "gradient"]:
500            mf = vtk.vtkImageGradient()
501            mf.SetDimensionality(3)
502            mf.SetInputData(image1)
503        elif op in ["div", "divergence"]:
504            mf = vtk.vtkImageDivergence()
505            mf.SetInputData(image1)
506        elif op in ["laplacian"]:
507            mf = vtk.vtkImageLaplacian()
508            mf.SetDimensionality(3)
509            mf.SetInputData(image1)
510        
511        if mf is not None:
512            mf.Update()
513            vol = Volume(mf.GetOutput())
514            vol.pipeline = utils.OperationNode(
515                f"operation\n{op}", parents=[self], c="#4cc9f0",
516                shape="cylinder",
517            )
518            return vol ###########################
519
520        mat = vtk.vtkImageMathematics()
521        mat.SetInput1Data(image1)
522
523        K = None
524
525        if utils.is_number(volume2):
526            K = volume2
527            mat.SetConstantK(K)
528            mat.SetConstantC(K)
529
530        elif volume2 is not None:  # assume image2 is a constant value
531            mat.SetInput2Data(volume2.imagedata())
532
533        # ###########################
534        if op in ["+", "add", "plus"]:
535            if K:
536                mat.SetOperationToAddConstant()
537            else:
538                mat.SetOperationToAdd()
539
540        elif op in ["-", "subtract", "minus"]:
541            if K:
542                mat.SetConstantC(-float(K))
543                mat.SetOperationToAddConstant()
544            else:
545                mat.SetOperationToSubtract()
546
547        elif op in ["*", "multiply", "times"]:
548            if K:
549                mat.SetOperationToMultiplyByK()
550            else:
551                mat.SetOperationToMultiply()
552
553        elif op in ["/", "divide"]:
554            if K:
555                mat.SetConstantK(1.0 / K)
556                mat.SetOperationToMultiplyByK()
557            else:
558                mat.SetOperationToDivide()
559
560        elif op in ["1/x", "invert"]:
561            mat.SetOperationToInvert()
562        elif op in ["sin"]:
563            mat.SetOperationToSin()
564        elif op in ["cos"]:
565            mat.SetOperationToCos()
566        elif op in ["exp"]:
567            mat.SetOperationToExp()
568        elif op in ["log"]:
569            mat.SetOperationToLog()
570        elif op in ["abs"]:
571            mat.SetOperationToAbsoluteValue()
572        elif op in ["**2", "square"]:
573            mat.SetOperationToSquare()
574        elif op in ["sqrt", "sqr"]:
575            mat.SetOperationToSquareRoot()
576        elif op in ["min"]:
577            mat.SetOperationToMin()
578        elif op in ["max"]:
579            mat.SetOperationToMax()
580        elif op in ["atan"]:
581            mat.SetOperationToATAN()
582        elif op in ["atan2"]:
583            mat.SetOperationToATAN2()
584        else:
585            vedo.logger.error(f"unknown operation {operation}")
586            raise RuntimeError()
587        mat.Update()
588
589        self._update(mat.GetOutput())
590
591        self.pipeline = utils.OperationNode(
592            f"operation\n{op}", parents=[self, volume2],
593            shape="cylinder", c="#4cc9f0",
594        )
595        return self
596
597    def frequency_pass_filter(self, low_cutoff=None, high_cutoff=None, order=1):
598        """
599        Low-pass and high-pass filtering become trivial in the frequency domain.
600        A portion of the pixels/voxels are simply masked or attenuated.
601        This function applies a high pass Butterworth filter that attenuates the
602        frequency domain image.
603
604        The gradual attenuation of the filter is important.
605        A simple high-pass filter would simply mask a set of pixels in the frequency domain,
606        but the abrupt transition would cause a ringing effect in the spatial domain.
607
608        Arguments:
609            low_cutoff : (list)
610                the cutoff frequencies for x, y and z
611            high_cutoff : (list)
612                the cutoff frequencies for x, y and z
613            order : (int)
614                order determines sharpness of the cutoff curve
615        """
616        # https://lorensen.github.io/VTKExamples/site/Cxx/ImageProcessing/IdealHighPass
617        fft = vtk.vtkImageFFT()
618        fft.SetInputData(self._data)
619        fft.Update()
620        out = fft.GetOutput()
621
622        if high_cutoff:
623            blp = vtk.vtkImageButterworthLowPass()
624            blp.SetInputData(out)
625            blp.SetCutOff(high_cutoff)
626            blp.SetOrder(order)
627            blp.Update()
628            out = blp.GetOutput()
629
630        if low_cutoff:
631            bhp = vtk.vtkImageButterworthHighPass()
632            bhp.SetInputData(out)
633            bhp.SetCutOff(low_cutoff)
634            bhp.SetOrder(order)
635            bhp.Update()
636            out = bhp.GetOutput()
637
638        rfft = vtk.vtkImageRFFT()
639        rfft.SetInputData(out)
640        rfft.Update()
641
642        ecomp = vtk.vtkImageExtractComponents()
643        ecomp.SetInputData(rfft.GetOutput())
644        ecomp.SetComponents(0)
645        ecomp.Update()
646        self._update(ecomp.GetOutput())
647        self.pipeline = utils.OperationNode("frequency_pass_filter", parents=[self], c="#4cc9f0")
648        return self
649
650    def smooth_gaussian(self, sigma=(2, 2, 2), radius=None):
651        """
652        Performs a convolution of the input Volume with a gaussian.
653
654        Arguments:
655            sigma : (float, list)
656                standard deviation(s) in voxel units.
657                A list can be given to smooth in the three direction differently.
658            radius : (float, list)
659                radius factor(s) determine how far out the gaussian
660                kernel will go before being clamped to zero. A list can be given too.
661        """
662        gsf = vtk.vtkImageGaussianSmooth()
663        gsf.SetDimensionality(3)
664        gsf.SetInputData(self.imagedata())
665        if utils.is_sequence(sigma):
666            gsf.SetStandardDeviations(sigma)
667        else:
668            gsf.SetStandardDeviation(sigma)
669        if radius is not None:
670            if utils.is_sequence(radius):
671                gsf.SetRadiusFactors(radius)
672            else:
673                gsf.SetRadiusFactor(radius)
674        gsf.Update()
675        self._update(gsf.GetOutput())
676        self.pipeline = utils.OperationNode("smooth_gaussian", parents=[self], c="#4cc9f0")
677        return self
678
679    def smooth_median(self, neighbours=(2, 2, 2)):
680        """
681        Median filter that replaces each pixel with the median value
682        from a rectangular neighborhood around that pixel.
683        """
684        imgm = vtk.vtkImageMedian3D()
685        imgm.SetInputData(self.imagedata())
686        if utils.is_sequence(neighbours):
687            imgm.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
688        else:
689            imgm.SetKernelSize(neighbours, neighbours, neighbours)
690        imgm.Update()
691        self._update(imgm.GetOutput())
692        self.pipeline = utils.OperationNode("smooth_median", parents=[self], c="#4cc9f0")
693        return self
694
695    def erode(self, neighbours=(2, 2, 2)):
696        """
697        Replace a voxel with the minimum over an ellipsoidal neighborhood of voxels.
698        If `neighbours` of an axis is 1, no processing is done on that axis.
699
700        Examples:
701            - [erode_dilate.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/erode_dilate.py)
702        
703                ![](https://vedo.embl.es/images/volumetric/erode_dilate.png)
704        """
705        ver = vtk.vtkImageContinuousErode3D()
706        ver.SetInputData(self._data)
707        ver.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
708        ver.Update()
709        self._update(ver.GetOutput())
710        self.pipeline = utils.OperationNode("erode", parents=[self], c="#4cc9f0")
711        return self
712
713    def dilate(self, neighbours=(2, 2, 2)):
714        """
715        Replace a voxel with the maximum over an ellipsoidal neighborhood of voxels.
716        If `neighbours` of an axis is 1, no processing is done on that axis.
717
718        Examples:
719            - [erode_dilate.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/erode_dilate.py)
720        """
721        ver = vtk.vtkImageContinuousDilate3D()
722        ver.SetInputData(self._data)
723        ver.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
724        ver.Update()
725        self._update(ver.GetOutput())
726        self.pipeline = utils.OperationNode("dilate", parents=[self], c="#4cc9f0")
727        return self
728
729    def magnitude(self):
730        """Colapses components with magnitude function."""
731        imgm = vtk.vtkImageMagnitude()
732        imgm.SetInputData(self.imagedata())
733        imgm.Update()
734        self._update(imgm.GetOutput())
735        self.pipeline = utils.OperationNode("magnitude", parents=[self], c="#4cc9f0")
736        return self
737
738    def topoints(self):
739        """
740        Extract all image voxels as points.
741        This function takes an input `Volume` and creates an `Mesh`
742        that contains the points and the point attributes.
743
744        Examples:
745            - [vol2points.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/vol2points.py)
746        """
747        v2p = vtk.vtkImageToPoints()
748        v2p.SetInputData(self.imagedata())
749        v2p.Update()
750        mpts = vedo.Points(v2p.GetOutput())
751        mpts.pipeline = utils.OperationNode("topoints", parents=[self], c="#4cc9f0:#e9c46a")
752        return mpts
753
754    def euclidean_distance(self, anisotropy=False, max_distance=None):
755        """
756        Implementation of the Euclidean DT (Distance Transform) using Saito's algorithm.
757        The distance map produced contains the square of the Euclidean distance values.
758        The algorithm has a O(n^(D+1)) complexity over n x n x...x n images in D dimensions.
759
760        Check out also: https://en.wikipedia.org/wiki/Distance_transform
761
762        Arguments:
763            anisotropy : bool
764                used to define whether Spacing should be used in the
765                computation of the distances.
766            max_distance : bool
767                any distance bigger than max_distance will not be
768                computed but set to this specified value instead.
769
770        Examples:
771            - [euclidian_dist.py](examples/volumetric/euclidian_dist.py)
772        """
773        euv = vtk.vtkImageEuclideanDistance()
774        euv.SetInputData(self._data)
775        euv.SetConsiderAnisotropy(anisotropy)
776        if max_distance is not None:
777            euv.InitializeOn()
778            euv.SetMaximumDistance(max_distance)
779        euv.SetAlgorithmToSaito()
780        euv.Update()
781        vol = Volume(euv.GetOutput())
782        vol.pipeline = utils.OperationNode("euclidean_distance", parents=[self], c="#4cc9f0")
783        return vol
784
785    def correlation_with(self, vol2, dim=2):
786        """
787        Find the correlation between two volumetric data sets.
788        Keyword `dim` determines whether the correlation will be 3D, 2D or 1D.
789        The default is a 2D Correlation.
790
791        The output size will match the size of the first input.
792        The second input is considered the correlation kernel.
793        """
794        imc = vtk.vtkImageCorrelation()
795        imc.SetInput1Data(self._data)
796        imc.SetInput2Data(vol2.imagedata())
797        imc.SetDimensionality(dim)
798        imc.Update()
799        vol = Volume(imc.GetOutput())
800
801        vol.pipeline = utils.OperationNode(
802            "correlation_with", parents=[self, vol2], c="#4cc9f0",
803        )
804        return vol
805
806    def scale_voxels(self, scale=1):
807        """Scale the voxel content by factor `scale`."""
808        rsl = vtk.vtkImageReslice()
809        rsl.SetInputData(self.imagedata())
810        rsl.SetScalarScale(scale)
811        rsl.Update()
812        self._update(rsl.GetOutput())
813        self.pipeline = utils.OperationNode(
814            f"scale_voxels\nscale={scale}", parents=[self], c="#4cc9f0")
815        return self

Base class. Do not instantiate.

BaseVolume()
40    def __init__(self):
41        """Base class. Do not instantiate."""
42
43        self._data = None
44        self._mapper = None
45        self.transform = None

Base class. Do not instantiate.

def clone(self):
128    def clone(self):
129        """Return a clone copy of the Volume."""
130        newimg = vtk.vtkImageData()
131        newimg.CopyStructure(self._data)
132        newimg.CopyAttributes(self._data)
133
134        newvol = Volume(newimg)
135        prop = vtk.vtkVolumeProperty()
136        prop.DeepCopy(self.GetProperty())
137        newvol.SetProperty(prop)
138        newvol.SetOrigin(self.GetOrigin())
139        newvol.SetScale(self.GetScale())
140        newvol.SetOrientation(self.GetOrientation())
141        newvol.SetPosition(self.GetPosition())
142        newvol.pipeline = utils.OperationNode(
143            "clone", parents=[self], c="#bbd0ff", shape="diamond")
144        return newvol

Return a clone copy of the Volume.

def imagedata(self):
146    def imagedata(self):
147        """Return the underlying `vtkImagaData` object."""
148        return self._data

Return the underlying vtkImagaData object.

def tonumpy(self):
150    def tonumpy(self):
151        """
152        Get read-write access to voxels of a Volume object as a numpy array.
153
154        When you set values in the output image, you don't want numpy to reallocate the array
155        but instead set values in the existing array, so use the [:] operator.
156
157        Example:
158            `arr[:] = arr*2 + 15`
159
160        If the array is modified add a call to:
161        `volume.imagedata().GetPointData().GetScalars().Modified()`
162        when all your modifications are completed.
163        """
164        narray_shape = tuple(reversed(self._data.GetDimensions()))
165
166        scals = self._data.GetPointData().GetScalars()
167        comps = scals.GetNumberOfComponents()
168        if comps == 1:
169            narray = utils.vtk2numpy(scals).reshape(narray_shape)
170            narray = np.transpose(narray, axes=[2, 1, 0])
171        else:
172            narray = utils.vtk2numpy(scals).reshape(*narray_shape, comps)
173            narray = np.transpose(narray, axes=[2, 1, 0, 3])
174
175        # narray = utils.vtk2numpy(self._data.GetPointData().GetScalars()).reshape(narray_shape)
176        # narray = np.transpose(narray, axes=[2, 1, 0])
177
178        return narray

Get read-write access to voxels of a Volume object as a numpy array.

When you set values in the output image, you don't want numpy to reallocate the array but instead set values in the existing array, so use the [:] operator.

Example:

arr[:] = arr*2 + 15

If the array is modified add a call to: volume.imagedata().GetPointData().GetScalars().Modified() when all your modifications are completed.

def dimensions(self):
180    def dimensions(self):
181        """Return the nr. of voxels in the 3 dimensions."""
182        return np.array(self._data.GetDimensions())

Return the nr. of voxels in the 3 dimensions.

@deprecated(reason=vedo.colors.red + 'Please use scalar_range()' + vedo.colors.reset)
def scalarRange(self):
184    @deprecated(reason=vedo.colors.red + "Please use scalar_range()" + vedo.colors.reset)
185    def scalarRange(self):
186        "Deprecated. Please use `scalar_range()`."
187        return self.scalar_range()

Deprecated. Please use scalar_range().

def scalar_range(self):
189    def scalar_range(self):
190        """Return the range of the scalar values."""
191        return np.array(self._data.GetScalarRange())

Return the range of the scalar values.

def spacing(self, s=None):
193    def spacing(self, s=None):
194        """Set/get the voxels size in the 3 dimensions."""
195        if s is not None:
196            self._data.SetSpacing(s)
197            return self
198        return np.array(self._data.GetSpacing())

Set/get the voxels size in the 3 dimensions.

def origin(self, s=None):
204    def origin(self, s=None):
205        """Set/get the origin of the volumetric dataset."""
206        ### supersedes base.origin()
207        ### DIFFERENT from base.origin()!
208        if s is not None:
209            self._data.SetOrigin(s)
210            return self
211        return np.array(self._data.GetOrigin())

Set/get the origin of the volumetric dataset.

def center(self, p=None):
213    def center(self, p=None):
214        """Set/get the center of the volumetric dataset."""
215        if p is not None:
216            self._data.SetCenter(p)
217            return self
218        return np.array(self._data.GetCenter())

Set/get the center of the volumetric dataset.

def permute_axes(self, x, y, z):
220    def permute_axes(self, x, y, z):
221        """
222        Reorder the axes of the Volume by specifying
223        the input axes which are supposed to become the new X, Y, and Z.
224        """
225        imp = vtk.vtkImagePermute()
226        imp.SetFilteredAxes(x, y, z)
227        imp.SetInputData(self.imagedata())
228        imp.Update()
229        self._update(imp.GetOutput())
230        self.pipeline = utils.OperationNode(
231            f"permute_axes\n{tuple(x,y,z)}", parents=[self], c="#4cc9f0")
232        return self

Reorder the axes of the Volume by specifying the input axes which are supposed to become the new X, Y, and Z.

def resample(self, new_spacing, interpolation=1):
234    def resample(self, new_spacing, interpolation=1):
235        """
236        Resamples a `Volume` to be larger or smaller.
237
238        This method modifies the spacing of the input.
239        Linear interpolation is used to resample the data.
240
241        Arguments:
242            new_spacing : (list)
243                a list of 3 new spacings for the 3 axes
244            interpolation : (int)
245                0=nearest_neighbor, 1=linear, 2=cubic
246        """
247        rsp = vtk.vtkImageResample()
248        oldsp = self.spacing()
249        for i in range(3):
250            if oldsp[i] != new_spacing[i]:
251                rsp.SetAxisOutputSpacing(i, new_spacing[i])
252        rsp.InterpolateOn()
253        rsp.SetInterpolationMode(interpolation)
254        rsp.OptimizationOn()
255        rsp.Update()
256        self._update(rsp.GetOutput())
257        self.pipeline = utils.OperationNode(
258            f"resample\n{tuple(new_spacing)}", parents=[self], c="#4cc9f0")
259        return self

Resamples a Volume to be larger or smaller.

This method modifies the spacing of the input. Linear interpolation is used to resample the data.

Arguments:
  • new_spacing : (list) a list of 3 new spacings for the 3 axes
  • interpolation : (int) 0=nearest_neighbor, 1=linear, 2=cubic
def interpolation(self, itype):
261    def interpolation(self, itype):
262        """
263        Set interpolation type.
264
265        0=nearest neighbour, 1=linear
266        """
267        self.property.SetInterpolationType(itype)
268        return self

Set interpolation type.

0=nearest neighbour, 1=linear

def threshold(self, above=None, below=None, replace=None, replace_value=None):
270    def threshold(self, above=None, below=None, replace=None, replace_value=None):
271        """
272        Binary or continuous volume thresholding.
273        Find the voxels that contain a value above/below the input values
274        and replace them with a new value (default is 0).
275        """
276        th = vtk.vtkImageThreshold()
277        th.SetInputData(self.imagedata())
278
279        # sanity checks
280        if above is not None and below is not None:
281            if above == below:
282                return self
283            if above > below:
284                vedo.logger.warning("in volume.threshold(), above > below, skip.")
285                return self
286
287        ## cases
288        if below is not None and above is not None:
289            th.ThresholdBetween(above, below)
290
291        elif above is not None:
292            th.ThresholdByUpper(above)
293
294        elif below is not None:
295            th.ThresholdByLower(below)
296
297        ##
298        if replace is not None:
299            th.SetReplaceIn(True)
300            th.SetInValue(replace)
301        else:
302            th.SetReplaceIn(False)
303
304        if replace_value is not None:
305            th.SetReplaceOut(True)
306            th.SetOutValue(replace_value)
307        else:
308            th.SetReplaceOut(False)
309
310        th.Update()
311        self._update(th.GetOutput())
312        self.pipeline = utils.OperationNode("threshold", parents=[self], c="#4cc9f0")
313        return self

Binary or continuous volume thresholding. Find the voxels that contain a value above/below the input values and replace them with a new value (default is 0).

def crop( self, left=None, right=None, back=None, front=None, bottom=None, top=None, VOI=()):
315    def crop(
316            self,
317            left=None, right=None,
318            back=None, front=None,
319            bottom=None, top=None,
320            VOI=()
321        ):
322        """
323        Crop a `Volume` object.
324
325        Arguments:
326            left : (float)
327                fraction to crop from the left plane (negative x)
328            right : (float)
329                fraction to crop from the right plane (positive x)
330            back : (float)
331                fraction to crop from the back plane (negative y)
332            front : (float)
333                fraction to crop from the front plane (positive y)
334            bottom : (float)
335                fraction to crop from the bottom plane (negative z)
336            top : (float)
337                fraction to crop from the top plane (positive z)
338            VOI : (list)
339                extract Volume Of Interest expressed in voxel numbers
340
341        Example:
342            `vol.crop(VOI=(xmin, xmax, ymin, ymax, zmin, zmax)) # all integers nrs`
343        """
344        extractVOI = vtk.vtkExtractVOI()
345        extractVOI.SetInputData(self.imagedata())
346
347        if VOI:
348            extractVOI.SetVOI(VOI)
349        else:
350            d = self.imagedata().GetDimensions()
351            bx0, bx1, by0, by1, bz0, bz1 = 0, d[0]-1, 0, d[1]-1, 0, d[2]-1
352            if left is not None:   bx0 = int((d[0]-1)*left)
353            if right is not None:  bx1 = int((d[0]-1)*(1-right))
354            if back is not None:   by0 = int((d[1]-1)*back)
355            if front is not None:  by1 = int((d[1]-1)*(1-front))
356            if bottom is not None: bz0 = int((d[2]-1)*bottom)
357            if top is not None:    bz1 = int((d[2]-1)*(1-top))
358            extractVOI.SetVOI(bx0, bx1, by0, by1, bz0, bz1)
359        extractVOI.Update()
360        self._update(extractVOI.GetOutput())
361
362        self.pipeline = utils.OperationNode(
363            "crop", parents=[self], c="#4cc9f0",
364            comment=f"dims={tuple(self.dimensions())}",
365        )
366        return self

Crop a Volume object.

Arguments:
  • left : (float) fraction to crop from the left plane (negative x)
  • right : (float) fraction to crop from the right plane (positive x)
  • back : (float) fraction to crop from the back plane (negative y)
  • front : (float) fraction to crop from the front plane (positive y)
  • bottom : (float) fraction to crop from the bottom plane (negative z)
  • top : (float) fraction to crop from the top plane (positive z)
  • VOI : (list) extract Volume Of Interest expressed in voxel numbers
Example:

vol.crop(VOI=(xmin, xmax, ymin, ymax, zmin, zmax)) # all integers nrs

def append(self, volumes, axis='z', preserve_extents=False):
368    def append(self, volumes, axis="z", preserve_extents=False):
369        """
370        Take the components from multiple inputs and merges them into one output.
371        Except for the append axis, all inputs must have the same extent.
372        All inputs must have the same number of scalar components.
373        The output has the same origin and spacing as the first input.
374        The origin and spacing of all other inputs are ignored.
375        All inputs must have the same scalar type.
376
377        Arguments:
378            axis : (int, str)
379                axis expanded to hold the multiple images
380            preserve_extents : (bool)
381                if True, the extent of the inputs is used to place
382                the image in the output. The whole extent of the output is the union of the input
383                whole extents. Any portion of the output not covered by the inputs is set to zero.
384                The origin and spacing is taken from the first input.
385
386        Example:
387            ```python
388            from vedo import Volume, dataurl
389            vol = Volume(dataurl+'embryo.tif')
390            vol.append(vol, axis='x').show().close()
391            ```
392            ![](https://vedo.embl.es/images/feats/volume_append.png)
393        """
394        ima = vtk.vtkImageAppend()
395        ima.SetInputData(self.imagedata())
396        if not utils.is_sequence(volumes):
397            volumes = [volumes]
398        for volume in volumes:
399            if isinstance(volume, vtk.vtkImageData):
400                ima.AddInputData(volume)
401            else:
402                ima.AddInputData(volume.imagedata())
403        ima.SetPreserveExtents(preserve_extents)
404        if axis == "x":
405            axis = 0
406        elif axis == "y":
407            axis = 1
408        elif axis == "z":
409            axis = 2
410        ima.SetAppendAxis(axis)
411        ima.Update()
412        self._update(ima.GetOutput())
413        
414        self.pipeline = utils.OperationNode(
415            "append", parents=[self, *volumes], c="#4cc9f0",
416            comment=f"dims={tuple(self.dimensions())}",
417        )
418        return self

Take the components from multiple inputs and merges them into one output. Except for the append axis, all inputs must have the same extent. All inputs must have the same number of scalar components. The output has the same origin and spacing as the first input. The origin and spacing of all other inputs are ignored. All inputs must have the same scalar type.

Arguments:
  • axis : (int, str) axis expanded to hold the multiple images
  • preserve_extents : (bool) if True, the extent of the inputs is used to place the image in the output. The whole extent of the output is the union of the input whole extents. Any portion of the output not covered by the inputs is set to zero. The origin and spacing is taken from the first input.
Example:
from vedo import Volume, dataurl
vol = Volume(dataurl+'embryo.tif')
vol.append(vol, axis='x').show().close()

def resize(self, *newdims):
420    def resize(self, *newdims):
421        """Increase or reduce the number of voxels of a Volume with interpolation."""
422        old_dims = np.array(self.imagedata().GetDimensions())
423        old_spac = np.array(self.imagedata().GetSpacing())
424        rsz = vtk.vtkImageResize()
425        rsz.SetResizeMethodToOutputDimensions()
426        rsz.SetInputData(self.imagedata())
427        rsz.SetOutputDimensions(newdims)
428        rsz.Update()
429        self._data = rsz.GetOutput()
430        new_spac = old_spac * old_dims / newdims  # keep aspect ratio
431        self._data.SetSpacing(new_spac)
432        self._update(self._data)
433        self.pipeline = utils.OperationNode(
434            "resize", parents=[self], c="#4cc9f0",
435            comment=f"dims={tuple(self.dimensions())}",
436        )
437        return self

Increase or reduce the number of voxels of a Volume with interpolation.

def normalize(self):
439    def normalize(self):
440        """Normalize that scalar components for each point."""
441        norm = vtk.vtkImageNormalize()
442        norm.SetInputData(self.imagedata())
443        norm.Update()
444        self._update(norm.GetOutput())
445        self.pipeline = utils.OperationNode("normalize", parents=[self], c="#4cc9f0")
446        return self

Normalize that scalar components for each point.

def mirror(self, axis='x'):
448    def mirror(self, axis="x"):
449        """
450        Mirror flip along one of the cartesian axes.
451        """
452        img = self.imagedata()
453
454        ff = vtk.vtkImageFlip()
455        ff.SetInputData(img)
456        if axis.lower() == "x":
457            ff.SetFilteredAxis(0)
458        elif axis.lower() == "y":
459            ff.SetFilteredAxis(1)
460        elif axis.lower() == "z":
461            ff.SetFilteredAxis(2)
462        else:
463            vedo.logger.error("mirror must be set to either x, y, z or n")
464            raise RuntimeError()
465        ff.Update()
466        self._update(ff.GetOutput())
467        self.pipeline = utils.OperationNode(f"mirror {axis}", parents=[self], c="#4cc9f0")
468        return self

Mirror flip along one of the cartesian axes.

def operation(self, operation, volume2=None):
470    def operation(self, operation, volume2=None):
471        """
472        Perform operations with `Volume` objects.
473        Keyword `volume2` can be a constant float.
474
475        Possible operations are:
476        ```
477        +, -, /, 1/x, sin, cos, exp, log,
478        abs, **2, sqrt, min, max, atan, atan2, median,
479        mag, dot, gradient, divergence, laplacian.
480        ```
481
482        Examples:
483            - [volume_operations.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/volume_operations.py)
484        """
485        op = operation.lower()
486        image1 = self._data
487
488        mf = None
489        if op in ["median"]:
490            mf = vtk.vtkImageMedian3D()
491            mf.SetInputData(image1)
492        elif op in ["mag"]:
493            mf = vtk.vtkImageMagnitude()
494            mf.SetInputData(image1)
495        elif op in ["dot", "dotproduct"]:
496            mf = vtk.vtkImageDotProduct()
497            mf.SetInput1Data(image1)
498            mf.SetInput2Data(volume2.imagedata())
499        elif op in ["grad", "gradient"]:
500            mf = vtk.vtkImageGradient()
501            mf.SetDimensionality(3)
502            mf.SetInputData(image1)
503        elif op in ["div", "divergence"]:
504            mf = vtk.vtkImageDivergence()
505            mf.SetInputData(image1)
506        elif op in ["laplacian"]:
507            mf = vtk.vtkImageLaplacian()
508            mf.SetDimensionality(3)
509            mf.SetInputData(image1)
510        
511        if mf is not None:
512            mf.Update()
513            vol = Volume(mf.GetOutput())
514            vol.pipeline = utils.OperationNode(
515                f"operation\n{op}", parents=[self], c="#4cc9f0",
516                shape="cylinder",
517            )
518            return vol ###########################
519
520        mat = vtk.vtkImageMathematics()
521        mat.SetInput1Data(image1)
522
523        K = None
524
525        if utils.is_number(volume2):
526            K = volume2
527            mat.SetConstantK(K)
528            mat.SetConstantC(K)
529
530        elif volume2 is not None:  # assume image2 is a constant value
531            mat.SetInput2Data(volume2.imagedata())
532
533        # ###########################
534        if op in ["+", "add", "plus"]:
535            if K:
536                mat.SetOperationToAddConstant()
537            else:
538                mat.SetOperationToAdd()
539
540        elif op in ["-", "subtract", "minus"]:
541            if K:
542                mat.SetConstantC(-float(K))
543                mat.SetOperationToAddConstant()
544            else:
545                mat.SetOperationToSubtract()
546
547        elif op in ["*", "multiply", "times"]:
548            if K:
549                mat.SetOperationToMultiplyByK()
550            else:
551                mat.SetOperationToMultiply()
552
553        elif op in ["/", "divide"]:
554            if K:
555                mat.SetConstantK(1.0 / K)
556                mat.SetOperationToMultiplyByK()
557            else:
558                mat.SetOperationToDivide()
559
560        elif op in ["1/x", "invert"]:
561            mat.SetOperationToInvert()
562        elif op in ["sin"]:
563            mat.SetOperationToSin()
564        elif op in ["cos"]:
565            mat.SetOperationToCos()
566        elif op in ["exp"]:
567            mat.SetOperationToExp()
568        elif op in ["log"]:
569            mat.SetOperationToLog()
570        elif op in ["abs"]:
571            mat.SetOperationToAbsoluteValue()
572        elif op in ["**2", "square"]:
573            mat.SetOperationToSquare()
574        elif op in ["sqrt", "sqr"]:
575            mat.SetOperationToSquareRoot()
576        elif op in ["min"]:
577            mat.SetOperationToMin()
578        elif op in ["max"]:
579            mat.SetOperationToMax()
580        elif op in ["atan"]:
581            mat.SetOperationToATAN()
582        elif op in ["atan2"]:
583            mat.SetOperationToATAN2()
584        else:
585            vedo.logger.error(f"unknown operation {operation}")
586            raise RuntimeError()
587        mat.Update()
588
589        self._update(mat.GetOutput())
590
591        self.pipeline = utils.OperationNode(
592            f"operation\n{op}", parents=[self, volume2],
593            shape="cylinder", c="#4cc9f0",
594        )
595        return self

Perform operations with Volume objects. Keyword volume2 can be a constant float.

Possible operations are:

+, -, /, 1/x, sin, cos, exp, log,
abs, **2, sqrt, min, max, atan, atan2, median,
mag, dot, gradient, divergence, laplacian.
Examples:
def frequency_pass_filter(self, low_cutoff=None, high_cutoff=None, order=1):
597    def frequency_pass_filter(self, low_cutoff=None, high_cutoff=None, order=1):
598        """
599        Low-pass and high-pass filtering become trivial in the frequency domain.
600        A portion of the pixels/voxels are simply masked or attenuated.
601        This function applies a high pass Butterworth filter that attenuates the
602        frequency domain image.
603
604        The gradual attenuation of the filter is important.
605        A simple high-pass filter would simply mask a set of pixels in the frequency domain,
606        but the abrupt transition would cause a ringing effect in the spatial domain.
607
608        Arguments:
609            low_cutoff : (list)
610                the cutoff frequencies for x, y and z
611            high_cutoff : (list)
612                the cutoff frequencies for x, y and z
613            order : (int)
614                order determines sharpness of the cutoff curve
615        """
616        # https://lorensen.github.io/VTKExamples/site/Cxx/ImageProcessing/IdealHighPass
617        fft = vtk.vtkImageFFT()
618        fft.SetInputData(self._data)
619        fft.Update()
620        out = fft.GetOutput()
621
622        if high_cutoff:
623            blp = vtk.vtkImageButterworthLowPass()
624            blp.SetInputData(out)
625            blp.SetCutOff(high_cutoff)
626            blp.SetOrder(order)
627            blp.Update()
628            out = blp.GetOutput()
629
630        if low_cutoff:
631            bhp = vtk.vtkImageButterworthHighPass()
632            bhp.SetInputData(out)
633            bhp.SetCutOff(low_cutoff)
634            bhp.SetOrder(order)
635            bhp.Update()
636            out = bhp.GetOutput()
637
638        rfft = vtk.vtkImageRFFT()
639        rfft.SetInputData(out)
640        rfft.Update()
641
642        ecomp = vtk.vtkImageExtractComponents()
643        ecomp.SetInputData(rfft.GetOutput())
644        ecomp.SetComponents(0)
645        ecomp.Update()
646        self._update(ecomp.GetOutput())
647        self.pipeline = utils.OperationNode("frequency_pass_filter", parents=[self], c="#4cc9f0")
648        return self

Low-pass and high-pass filtering become trivial in the frequency domain. A portion of the pixels/voxels are simply masked or attenuated. This function applies a high pass Butterworth filter that attenuates the frequency domain image.

The gradual attenuation of the filter is important. A simple high-pass filter would simply mask a set of pixels in the frequency domain, but the abrupt transition would cause a ringing effect in the spatial domain.

Arguments:
  • low_cutoff : (list) the cutoff frequencies for x, y and z
  • high_cutoff : (list) the cutoff frequencies for x, y and z
  • order : (int) order determines sharpness of the cutoff curve
def smooth_gaussian(self, sigma=(2, 2, 2), radius=None):
650    def smooth_gaussian(self, sigma=(2, 2, 2), radius=None):
651        """
652        Performs a convolution of the input Volume with a gaussian.
653
654        Arguments:
655            sigma : (float, list)
656                standard deviation(s) in voxel units.
657                A list can be given to smooth in the three direction differently.
658            radius : (float, list)
659                radius factor(s) determine how far out the gaussian
660                kernel will go before being clamped to zero. A list can be given too.
661        """
662        gsf = vtk.vtkImageGaussianSmooth()
663        gsf.SetDimensionality(3)
664        gsf.SetInputData(self.imagedata())
665        if utils.is_sequence(sigma):
666            gsf.SetStandardDeviations(sigma)
667        else:
668            gsf.SetStandardDeviation(sigma)
669        if radius is not None:
670            if utils.is_sequence(radius):
671                gsf.SetRadiusFactors(radius)
672            else:
673                gsf.SetRadiusFactor(radius)
674        gsf.Update()
675        self._update(gsf.GetOutput())
676        self.pipeline = utils.OperationNode("smooth_gaussian", parents=[self], c="#4cc9f0")
677        return self

Performs a convolution of the input Volume with a gaussian.

Arguments:
  • sigma : (float, list) standard deviation(s) in voxel units. A list can be given to smooth in the three direction differently.
  • radius : (float, list) radius factor(s) determine how far out the gaussian kernel will go before being clamped to zero. A list can be given too.
def smooth_median(self, neighbours=(2, 2, 2)):
679    def smooth_median(self, neighbours=(2, 2, 2)):
680        """
681        Median filter that replaces each pixel with the median value
682        from a rectangular neighborhood around that pixel.
683        """
684        imgm = vtk.vtkImageMedian3D()
685        imgm.SetInputData(self.imagedata())
686        if utils.is_sequence(neighbours):
687            imgm.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
688        else:
689            imgm.SetKernelSize(neighbours, neighbours, neighbours)
690        imgm.Update()
691        self._update(imgm.GetOutput())
692        self.pipeline = utils.OperationNode("smooth_median", parents=[self], c="#4cc9f0")
693        return self

Median filter that replaces each pixel with the median value from a rectangular neighborhood around that pixel.

def erode(self, neighbours=(2, 2, 2)):
695    def erode(self, neighbours=(2, 2, 2)):
696        """
697        Replace a voxel with the minimum over an ellipsoidal neighborhood of voxels.
698        If `neighbours` of an axis is 1, no processing is done on that axis.
699
700        Examples:
701            - [erode_dilate.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/erode_dilate.py)
702        
703                ![](https://vedo.embl.es/images/volumetric/erode_dilate.png)
704        """
705        ver = vtk.vtkImageContinuousErode3D()
706        ver.SetInputData(self._data)
707        ver.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
708        ver.Update()
709        self._update(ver.GetOutput())
710        self.pipeline = utils.OperationNode("erode", parents=[self], c="#4cc9f0")
711        return self

Replace a voxel with the minimum over an ellipsoidal neighborhood of voxels. If neighbours of an axis is 1, no processing is done on that axis.

Examples:
def dilate(self, neighbours=(2, 2, 2)):
713    def dilate(self, neighbours=(2, 2, 2)):
714        """
715        Replace a voxel with the maximum over an ellipsoidal neighborhood of voxels.
716        If `neighbours` of an axis is 1, no processing is done on that axis.
717
718        Examples:
719            - [erode_dilate.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/erode_dilate.py)
720        """
721        ver = vtk.vtkImageContinuousDilate3D()
722        ver.SetInputData(self._data)
723        ver.SetKernelSize(neighbours[0], neighbours[1], neighbours[2])
724        ver.Update()
725        self._update(ver.GetOutput())
726        self.pipeline = utils.OperationNode("dilate", parents=[self], c="#4cc9f0")
727        return self

Replace a voxel with the maximum over an ellipsoidal neighborhood of voxels. If neighbours of an axis is 1, no processing is done on that axis.

Examples:
def magnitude(self):
729    def magnitude(self):
730        """Colapses components with magnitude function."""
731        imgm = vtk.vtkImageMagnitude()
732        imgm.SetInputData(self.imagedata())
733        imgm.Update()
734        self._update(imgm.GetOutput())
735        self.pipeline = utils.OperationNode("magnitude", parents=[self], c="#4cc9f0")
736        return self

Colapses components with magnitude function.

def topoints(self):
738    def topoints(self):
739        """
740        Extract all image voxels as points.
741        This function takes an input `Volume` and creates an `Mesh`
742        that contains the points and the point attributes.
743
744        Examples:
745            - [vol2points.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/vol2points.py)
746        """
747        v2p = vtk.vtkImageToPoints()
748        v2p.SetInputData(self.imagedata())
749        v2p.Update()
750        mpts = vedo.Points(v2p.GetOutput())
751        mpts.pipeline = utils.OperationNode("topoints", parents=[self], c="#4cc9f0:#e9c46a")
752        return mpts

Extract all image voxels as points. This function takes an input Volume and creates an Mesh that contains the points and the point attributes.

Examples:
def euclidean_distance(self, anisotropy=False, max_distance=None):
754    def euclidean_distance(self, anisotropy=False, max_distance=None):
755        """
756        Implementation of the Euclidean DT (Distance Transform) using Saito's algorithm.
757        The distance map produced contains the square of the Euclidean distance values.
758        The algorithm has a O(n^(D+1)) complexity over n x n x...x n images in D dimensions.
759
760        Check out also: https://en.wikipedia.org/wiki/Distance_transform
761
762        Arguments:
763            anisotropy : bool
764                used to define whether Spacing should be used in the
765                computation of the distances.
766            max_distance : bool
767                any distance bigger than max_distance will not be
768                computed but set to this specified value instead.
769
770        Examples:
771            - [euclidian_dist.py](examples/volumetric/euclidian_dist.py)
772        """
773        euv = vtk.vtkImageEuclideanDistance()
774        euv.SetInputData(self._data)
775        euv.SetConsiderAnisotropy(anisotropy)
776        if max_distance is not None:
777            euv.InitializeOn()
778            euv.SetMaximumDistance(max_distance)
779        euv.SetAlgorithmToSaito()
780        euv.Update()
781        vol = Volume(euv.GetOutput())
782        vol.pipeline = utils.OperationNode("euclidean_distance", parents=[self], c="#4cc9f0")
783        return vol

Implementation of the Euclidean DT (Distance Transform) using Saito's algorithm. The distance map produced contains the square of the Euclidean distance values. The algorithm has a O(n^(D+1)) complexity over n x n x...x n images in D dimensions.

Check out also: https://en.wikipedia.org/wiki/Distance_transform

Arguments:
  • anisotropy : bool used to define whether Spacing should be used in the computation of the distances.
  • max_distance : bool any distance bigger than max_distance will not be computed but set to this specified value instead.
Examples:
def correlation_with(self, vol2, dim=2):
785    def correlation_with(self, vol2, dim=2):
786        """
787        Find the correlation between two volumetric data sets.
788        Keyword `dim` determines whether the correlation will be 3D, 2D or 1D.
789        The default is a 2D Correlation.
790
791        The output size will match the size of the first input.
792        The second input is considered the correlation kernel.
793        """
794        imc = vtk.vtkImageCorrelation()
795        imc.SetInput1Data(self._data)
796        imc.SetInput2Data(vol2.imagedata())
797        imc.SetDimensionality(dim)
798        imc.Update()
799        vol = Volume(imc.GetOutput())
800
801        vol.pipeline = utils.OperationNode(
802            "correlation_with", parents=[self, vol2], c="#4cc9f0",
803        )
804        return vol

Find the correlation between two volumetric data sets. Keyword dim determines whether the correlation will be 3D, 2D or 1D. The default is a 2D Correlation.

The output size will match the size of the first input. The second input is considered the correlation kernel.

def scale_voxels(self, scale=1):
806    def scale_voxels(self, scale=1):
807        """Scale the voxel content by factor `scale`."""
808        rsl = vtk.vtkImageReslice()
809        rsl.SetInputData(self.imagedata())
810        rsl.SetScalarScale(scale)
811        rsl.Update()
812        self._update(rsl.GetOutput())
813        self.pipeline = utils.OperationNode(
814            f"scale_voxels\nscale={scale}", parents=[self], c="#4cc9f0")
815        return self

Scale the voxel content by factor scale.

class Volume(BaseVolume, vedo.base.BaseGrid, vtkmodules.vtkRenderingCore.vtkVolume):
 819class Volume(BaseVolume, BaseGrid, vtk.vtkVolume):
 820    """
 821    Class to describe dataset that are defined on "voxels":
 822    the 3D equivalent of 2D pixels.
 823    """
 824    def __init__(
 825        self,
 826        inputobj=None,
 827        c="RdBu_r",
 828        alpha=(0.0, 0.0, 0.2, 0.4, 0.8, 1.0),
 829        alpha_gradient=None,
 830        alpha_unit=1,
 831        mode=0,
 832        spacing=None,
 833        dims=None,
 834        origin=None,
 835        mapper="smart",
 836    ):
 837        """
 838        This class can be initialized with a numpy object, a `vtkImageData`
 839        or a list of 2D bmp files.
 840
 841        Arguments:
 842            c : (list, str)
 843                sets colors along the scalar range, or a matplotlib color map name
 844            alphas : (float, list)
 845                sets transparencies along the scalar range
 846            alpha_unit : (float)
 847                low values make composite rendering look brighter and denser
 848            origin : (list)
 849                set volume origin coordinates
 850            spacing : (list)
 851                voxel dimensions in x, y and z.
 852            dims : (list)
 853                specify the dimensions of the volume.
 854            mapper : (str)
 855                either 'gpu', 'opengl_gpu', 'fixed' or 'smart'
 856            mode : (int)
 857                define the volumetric rendering style:
 858                    - 0, composite rendering
 859                    - 1, maximum projection
 860                    - 2, minimum projection
 861                    - 3, average projection
 862                    - 4, additive mode
 863
 864                <br>The default mode is "composite" where the scalar values are sampled through
 865                the volume and composited in a front-to-back scheme through alpha blending.
 866                The final color and opacity is determined using the color and opacity transfer
 867                functions specified in alpha keyword.
 868
 869                Maximum and minimum intensity blend modes use the maximum and minimum
 870                scalar values, respectively, along the sampling ray.
 871                The final color and opacity is determined by passing the resultant value
 872                through the color and opacity transfer functions.
 873
 874                Additive blend mode accumulates scalar values by passing each value
 875                through the opacity transfer function and then adding up the product
 876                of the value and its opacity. In other words, the scalar values are scaled
 877                using the opacity transfer function and summed to derive the final color.
 878                Note that the resulting image is always grayscale i.e. aggregated values
 879                are not passed through the color transfer function.
 880                This is because the final value is a derived value and not a real data value
 881                along the sampling ray.
 882
 883                Average intensity blend mode works similar to the additive blend mode where
 884                the scalar values are multiplied by opacity calculated from the opacity
 885                transfer function and then added.
 886                The additional step here is to divide the sum by the number of samples
 887                taken through the volume.
 888                As is the case with the additive intensity projection, the final image will
 889                always be grayscale i.e. the aggregated values are not passed through the
 890                color transfer function.
 891
 892        Example:
 893            ```python
 894            from vedo import Volume
 895            vol = Volume("path/to/mydata/rec*.bmp")
 896            vol.show()
 897            ```
 898
 899        Examples:
 900            - [numpy2volume1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/numpy2volume1.py)
 901
 902                ![](https://vedo.embl.es/images/volumetric/numpy2volume1.png)
 903
 904            - [read_volume2.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/read_volume2.py)
 905
 906                ![](https://vedo.embl.es/images/volumetric/read_volume2.png)
 907
 908        .. note:: 
 909            if a `list` of values is used for `alphas` this is interpreted
 910            as a transfer function along the range of the scalar.
 911        """
 912        vtk.vtkVolume.__init__(self)
 913        BaseGrid.__init__(self)
 914        BaseVolume.__init__(self)
 915        # super().__init__()
 916        
 917        ###################
 918        if isinstance(inputobj, str):
 919
 920            if "https://" in inputobj:
 921                inputobj = vedo.io.download(inputobj, verbose=False)  # fpath
 922            elif os.path.isfile(inputobj):
 923                pass
 924            else:
 925                inputobj = sorted(glob.glob(inputobj))
 926
 927        ###################
 928        if "gpu" in mapper:
 929            self._mapper = vtk.vtkGPUVolumeRayCastMapper()
 930        elif "opengl_gpu" in mapper:
 931            self._mapper = vtk.vtkOpenGLGPUVolumeRayCastMapper()
 932        elif "smart" in mapper:
 933            self._mapper = vtk.vtkSmartVolumeMapper()
 934        elif "fixed" in mapper:
 935            self._mapper = vtk.vtkFixedPointVolumeRayCastMapper()
 936        elif isinstance(mapper, vtk.vtkMapper):
 937            self._mapper = mapper
 938        else:
 939            print("Error unknown mapper type", [mapper])
 940            raise RuntimeError()
 941        self.SetMapper(self._mapper)
 942
 943        ###################
 944        inputtype = str(type(inputobj))
 945
 946        # print('Volume inputtype', inputtype, c='b')
 947
 948        if inputobj is None:
 949            img = vtk.vtkImageData()
 950
 951        elif utils.is_sequence(inputobj):
 952
 953            if isinstance(inputobj[0], str) and ".bmp" in inputobj[0].lower():
 954                # scan sequence of BMP files
 955                ima = vtk.vtkImageAppend()
 956                ima.SetAppendAxis(2)
 957                pb = utils.ProgressBar(0, len(inputobj))
 958                for i in pb.range():
 959                    f = inputobj[i]
 960                    if "_rec_spr.bmp" in f:
 961                        continue
 962                    picr = vtk.vtkBMPReader()
 963                    picr.SetFileName(f)
 964                    picr.Update()
 965                    mgf = vtk.vtkImageMagnitude()
 966                    mgf.SetInputData(picr.GetOutput())
 967                    mgf.Update()
 968                    ima.AddInputData(mgf.GetOutput())
 969                    pb.print("loading...")
 970                ima.Update()
 971                img = ima.GetOutput()
 972
 973            else:
 974
 975                if len(inputobj.shape) == 1:
 976                    varr = utils.numpy2vtk(inputobj)
 977                else:
 978                    varr = utils.numpy2vtk(inputobj.ravel(order="F"))
 979                varr.SetName("input_scalars")
 980
 981                img = vtk.vtkImageData()
 982                if dims is not None:
 983                    img.SetDimensions(dims[2],dims[1],dims[0])
 984                else:
 985                    if len(inputobj.shape)==1:
 986                        vedo.logger.error("must set dimensions (dims keyword) in Volume")
 987                        raise RuntimeError()
 988                    img.SetDimensions(inputobj.shape)
 989                img.GetPointData().AddArray(varr)
 990                img.GetPointData().SetActiveScalars(varr.GetName())
 991
 992        elif "ImageData" in inputtype:
 993            img = inputobj
 994
 995        elif isinstance(inputobj, Volume):
 996            img = inputobj.inputdata()
 997
 998        elif "UniformGrid" in inputtype:
 999            img = inputobj
1000
1001        elif hasattr(inputobj, "GetOutput"):  # passing vtk object, try extract imagdedata
1002            if hasattr(inputobj, "Update"):
1003                inputobj.Update()
1004            img = inputobj.GetOutput()
1005
1006        elif isinstance(inputobj, str):
1007            if "https://" in inputobj:
1008                inputobj = vedo.io.download(inputobj, verbose=False)
1009            img = vedo.io.loadImageData(inputobj)
1010
1011        else:
1012            vedo.logger.error(f"cannot understand input type {inputtype}")
1013            return
1014
1015        if dims is not None:
1016            img.SetDimensions(dims)
1017
1018        if origin is not None:
1019            img.SetOrigin(origin)  ### DIFFERENT from volume.origin()!
1020
1021        if spacing is not None:
1022            img.SetSpacing(spacing)
1023
1024        self._data = img
1025        self._mapper.SetInputData(img)
1026
1027        if img.GetPointData().GetScalars():
1028            if img.GetPointData().GetScalars().GetNumberOfComponents() == 1:
1029                self.mode(mode).color(c).alpha(alpha).alpha_gradient(alpha_gradient)
1030                self.GetProperty().SetShade(True)
1031                self.GetProperty().SetInterpolationType(1)
1032                self.GetProperty().SetScalarOpacityUnitDistance(alpha_unit)
1033
1034        # remember stuff:
1035        self._mode = mode
1036        self._color = c
1037        self._alpha = alpha
1038        self._alpha_grad = alpha_gradient
1039        self._alpha_unit = alpha_unit
1040        
1041        self.pipeline = utils.OperationNode("Volume",
1042            comment=f"dims={tuple(self.dimensions())}", c="#4cc9f0",
1043        )
1044        #######################################################################
1045
1046
1047    def _update(self, data):
1048        self._data = data
1049        self._data.GetPointData().Modified()
1050        self._mapper.SetInputData(data)
1051        self._mapper.Modified()
1052        self._mapper.Update()
1053        return self
1054
1055    def mode(self, mode=None):
1056        """
1057        Define the volumetric rendering mode following this:
1058            - 0, composite rendering
1059            - 1, maximum projection rendering
1060            - 2, minimum projection rendering
1061            - 3, average projection rendering
1062            - 4, additive mode
1063
1064        The default mode is "composite" where the scalar values are sampled through
1065        the volume and composited in a front-to-back scheme through alpha blending.
1066        The final color and opacity is determined using the color and opacity transfer
1067        functions specified in alpha keyword.
1068
1069        Maximum and minimum intensity blend modes use the maximum and minimum
1070        scalar values, respectively, along the sampling ray.
1071        The final color and opacity is determined by passing the resultant value
1072        through the color and opacity transfer functions.
1073
1074        Additive blend mode accumulates scalar values by passing each value
1075        through the opacity transfer function and then adding up the product
1076        of the value and its opacity. In other words, the scalar values are scaled
1077        using the opacity transfer function and summed to derive the final color.
1078        Note that the resulting image is always grayscale i.e. aggregated values
1079        are not passed through the color transfer function.
1080        This is because the final value is a derived value and not a real data value
1081        along the sampling ray.
1082
1083        Average intensity blend mode works similar to the additive blend mode where
1084        the scalar values are multiplied by opacity calculated from the opacity
1085        transfer function and then added.
1086        The additional step here is to divide the sum by the number of samples
1087        taken through the volume.
1088        As is the case with the additive intensity projection, the final image will
1089        always be grayscale i.e. the aggregated values are not passed through the
1090        color transfer function.
1091        """
1092        if mode is None:
1093            return self._mapper.GetBlendMode()
1094
1095        if isinstance(mode, str):
1096            if "comp" in mode:
1097                mode = 0
1098            elif "proj" in mode:
1099                if "max" in mode:
1100                    mode = 1
1101                elif "min" in mode:
1102                    mode = 2
1103                elif "ave" in mode:
1104                    mode = 3
1105                else:
1106                    vedo.logger.warning(f"unknown mode {mode}")
1107                    mode = 0
1108            elif "add" in mode:
1109                mode = 4
1110            else:
1111                vedo.logger.warning(f"unknown mode {mode}")
1112                mode = 0
1113
1114        self._mapper.SetBlendMode(mode)
1115        self._mode = mode
1116        return self
1117
1118    def shade(self, status=None):
1119        """
1120        Set/Get the shading of a Volume.
1121        Shading can be further controlled with `volume.lighting()` method.
1122
1123        If shading is turned on, the mapper may perform shading calculations.
1124        In some cases shading does not apply
1125        (for example, in maximum intensity projection mode).
1126        """
1127        if status is None:
1128            return self.GetProperty().GetShade()
1129        self.GetProperty().SetShade(status)
1130        return self
1131
1132    def cmap(self, c, alpha=None, vmin=None, vmax=None):
1133        """Same as `color()`.
1134
1135        Arguments:
1136            alpha : (list)
1137                use a list to specify transparencies along the scalar range
1138            vmin : (float)
1139                force the min of the scalar range to be this value
1140            vmax : (float)
1141                force the max of the scalar range to be this value
1142        """
1143        return self.color(c, alpha, vmin, vmax)
1144
1145    def jittering(self, status=None):
1146        """
1147        If `True`, each ray traversal direction will be perturbed slightly
1148        using a noise-texture to get rid of wood-grain effects.
1149        """
1150        if hasattr(self._mapper, "SetUseJittering"):  # tetmesh doesnt have it
1151            if status is None:
1152                return self._mapper.GetUseJittering()
1153            self._mapper.SetUseJittering(status)
1154        return self
1155
1156    def mask(self, data):
1157        """
1158        Mask a volume visualization with a binary value.
1159        Needs to specify keyword mapper='gpu'.
1160
1161        Example:
1162        ```python
1163            from vedo import np, Volume, show
1164            data_matrix = np.zeros([75, 75, 75], dtype=np.uint8)
1165            # all voxels have value zero except:
1166            data_matrix[0:35,   0:35,  0:35] = 1
1167            data_matrix[35:55, 35:55, 35:55] = 2
1168            data_matrix[55:74, 55:74, 55:74] = 3
1169            vol = Volume(data_matrix, c=['white','b','g','r'], mapper='gpu')
1170            data_mask = np.zeros_like(data_matrix)
1171            data_mask[10:65, 10:45, 20:75] = 1
1172            vol.mask(data_mask)
1173            show(vol, axes=1).close()
1174        ```
1175        """
1176        mask = Volume(data.astype(np.uint8))
1177        try:
1178            self.mapper().SetMaskTypeToBinary()
1179            self.mapper().SetMaskInput(mask.inputdata())
1180        except AttributeError:
1181            vedo.logger.error("volume.mask() must create the volume with Volume(..., mapper='gpu')")
1182        return self
1183
1184    def alpha_gradient(self, alpha_grad, vmin=None, vmax=None):
1185        """
1186        Assign a set of tranparencies to a volume's gradient
1187        along the range of the scalar value.
1188        A single constant value can also be assigned.
1189        The gradient function is used to decrease the opacity
1190        in the "flat" regions of the volume while maintaining the opacity
1191        at the boundaries between material types.  The gradient is measured
1192        as the amount by which the intensity changes over unit distance.
1193
1194        The format for alpha_grad is the same as for method `volume.alpha()`.
1195        """
1196        if vmin is None:
1197            vmin, _ = self._data.GetScalarRange()
1198        if vmax is None:
1199            _, vmax = self._data.GetScalarRange()
1200        self._alpha_grad = alpha_grad
1201        volumeProperty = self.GetProperty()
1202
1203        if alpha_grad is None:
1204            volumeProperty.DisableGradientOpacityOn()
1205            return self
1206
1207        volumeProperty.DisableGradientOpacityOff()
1208
1209        gotf = volumeProperty.GetGradientOpacity()
1210        if utils.is_sequence(alpha_grad):
1211            alpha_grad = np.array(alpha_grad)
1212            if len(alpha_grad.shape)==1: # user passing a flat list e.g. (0.0, 0.3, 0.9, 1)
1213                for i, al in enumerate(alpha_grad):
1214                    xalpha = vmin + (vmax - vmin) * i / (len(alpha_grad) - 1)
1215                    # Create transfer mapping scalar value to gradient opacity
1216                    gotf.AddPoint(xalpha, al)
1217            elif len(alpha_grad.shape) == 2:  # user passing [(x0,alpha0), ...]
1218                gotf.AddPoint(vmin, alpha_grad[0][1])
1219                for xalpha, al in alpha_grad:
1220                    # Create transfer mapping scalar value to opacity
1221                    gotf.AddPoint(xalpha, al)
1222                gotf.AddPoint(vmax, alpha_grad[-1][1])
1223            # print("alpha_grad at", round(xalpha, 1), "\tset to", al)
1224        else:
1225            gotf.AddPoint(vmin, alpha_grad)  # constant alpha_grad
1226            gotf.AddPoint(vmax, alpha_grad)
1227        return self
1228
1229    def component_weight(self, i, weight):
1230        """Set the scalar component weight in range [0,1]."""
1231        self.GetProperty().SetComponentWeight(i, weight)
1232        return self
1233
1234    def xslice(self, i):
1235        """Extract the slice at index `i` of volume along x-axis."""
1236        vslice = vtk.vtkImageDataGeometryFilter()
1237        vslice.SetInputData(self.imagedata())
1238        nx, ny, nz = self.imagedata().GetDimensions()
1239        if i > nx - 1:
1240            i = nx - 1
1241        vslice.SetExtent(i, i, 0, ny, 0, nz)
1242        vslice.Update()
1243        m = Mesh(vslice.GetOutput())
1244        m.pipeline = utils.OperationNode(f"xslice {i}", parents=[self], c="#4cc9f0:#e9c46a")
1245        return m
1246
1247    def yslice(self, j):
1248        """Extract the slice at index `j` of volume along y-axis."""
1249        vslice = vtk.vtkImageDataGeometryFilter()
1250        vslice.SetInputData(self.imagedata())
1251        nx, ny, nz = self.imagedata().GetDimensions()
1252        if j > ny - 1:
1253            j = ny - 1
1254        vslice.SetExtent(0, nx, j, j, 0, nz)
1255        vslice.Update()
1256        m = Mesh(vslice.GetOutput())
1257        m.pipeline = utils.OperationNode(f"yslice {j}", parents=[self], c="#4cc9f0:#e9c46a")
1258        return m
1259
1260    def zslice(self, k):
1261        """Extract the slice at index `i` of volume along z-axis."""
1262        vslice = vtk.vtkImageDataGeometryFilter()
1263        vslice.SetInputData(self.imagedata())
1264        nx, ny, nz = self.imagedata().GetDimensions()
1265        if k > nz - 1:
1266            k = nz - 1
1267        vslice.SetExtent(0, nx, 0, ny, k, k)
1268        vslice.Update()
1269        m = Mesh(vslice.GetOutput())
1270        m.pipeline = utils.OperationNode(f"zslice {k}", parents=[self], c="#4cc9f0:#e9c46a")
1271        return m
1272
1273    @deprecated(reason=vedo.colors.red + "Please use slice_plane()" + vedo.colors.reset)
1274    def slicePlane(self, *a, **b):
1275        "Deprecated. Please use `scalar_range()`"
1276        return self.slice_plane(*a, **b)
1277
1278    def slice_plane(self, origin=(0, 0, 0), normal=(1, 1, 1), autocrop=False):
1279        """
1280        Extract the slice along a given plane position and normal.
1281
1282        Example:
1283            - [slicePlane1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/slicePlane1.py)
1284
1285                ![](https://vedo.embl.es/images/volumetric/slicePlane1.gif)
1286        """
1287        reslice = vtk.vtkImageReslice()
1288        reslice.SetInputData(self._data)
1289        reslice.SetOutputDimensionality(2)
1290        newaxis = utils.versor(normal)
1291        pos = np.array(origin)
1292        initaxis = (0, 0, 1)
1293        crossvec = np.cross(initaxis, newaxis)
1294        angle = np.arccos(np.dot(initaxis, newaxis))
1295        T = vtk.vtkTransform()
1296        T.PostMultiply()
1297        T.RotateWXYZ(np.rad2deg(angle), crossvec)
1298        T.Translate(pos)
1299        M = T.GetMatrix()
1300        reslice.SetResliceAxes(M)
1301        reslice.SetInterpolationModeToLinear()
1302        reslice.SetAutoCropOutput(not autocrop)
1303        reslice.Update()
1304        vslice = vtk.vtkImageDataGeometryFilter()
1305        vslice.SetInputData(reslice.GetOutput())
1306        vslice.Update()
1307        msh = Mesh(vslice.GetOutput())
1308        msh.SetOrientation(T.GetOrientation())
1309        msh.SetPosition(pos)
1310        msh.pipeline = utils.OperationNode(
1311            "slice_plane", parents=[self], c="#4cc9f0:#e9c46a")
1312        return msh
1313
1314    def warp(self, source, target, sigma=1, mode="3d", fit=False):
1315        """
1316        Warp volume scalars within a Volume by specifying
1317        source and target sets of points.
1318
1319        Arguments:
1320            source : (Points, list)
1321                the list of source points
1322            target : (Points, list)
1323                the list of target points
1324            fit : (bool)
1325                fit/adapt the old bounding box to the warped geometry
1326        """
1327        if isinstance(source, vedo.Points):
1328            source = source.points()
1329        if isinstance(target, vedo.Points):
1330            target = target.points()
1331
1332        ns = len(source)
1333        ptsou = vtk.vtkPoints()
1334        ptsou.SetNumberOfPoints(ns)
1335        for i in range(ns):
1336            ptsou.SetPoint(i, source[i])
1337
1338        nt = len(target)
1339        if ns != nt:
1340            vedo.logger.error(f"#source {ns} != {nt} #target points")
1341            raise RuntimeError()
1342
1343        pttar = vtk.vtkPoints()
1344        pttar.SetNumberOfPoints(nt)
1345        for i in range(ns):
1346            pttar.SetPoint(i, target[i])
1347
1348        T = vtk.vtkThinPlateSplineTransform()
1349        if mode.lower() == "3d":
1350            T.SetBasisToR()
1351        elif mode.lower() == "2d":
1352            T.SetBasisToR2LogR()
1353        else:
1354            vedo.logger.error(f"unknown mode {mode}")
1355            raise RuntimeError()
1356
1357        T.SetSigma(sigma)
1358        T.SetSourceLandmarks(ptsou)
1359        T.SetTargetLandmarks(pttar)
1360        T.Inverse()
1361        self.transform = T
1362        self.apply_transform(T, fit=fit)
1363        self.pipeline = utils.OperationNode("warp", parents=[self], c="#4cc9f0")
1364        return self
1365
1366    def apply_transform(self, T, fit=False):
1367        """
1368        Apply a transform to the scalars in the volume.
1369
1370        Arguments:
1371            T : (vtkTransform, matrix)
1372                The transformation to be applied
1373            fit : (bool)
1374                fit/adapt the old bounding box to the warped geometry
1375        """
1376        if isinstance(T, vtk.vtkMatrix4x4):
1377            tr = vtk.vtkTransform()
1378            tr.SetMatrix(T)
1379            T = tr
1380
1381        elif utils.is_sequence(T):
1382            M = vtk.vtkMatrix4x4()
1383            n = len(T[0])
1384            for i in range(n):
1385                for j in range(n):
1386                    M.SetElement(i, j, T[i][j])
1387            tr = vtk.vtkTransform()
1388            tr.SetMatrix(M)
1389            T = tr
1390
1391        reslice = vtk.vtkImageReslice()
1392        reslice.SetInputData(self._data)
1393        reslice.SetResliceTransform(T)
1394        reslice.SetOutputDimensionality(3)
1395        reslice.SetInterpolationModeToLinear()
1396
1397        spacing = self._data.GetSpacing()
1398        origin = self._data.GetOrigin()
1399
1400        if fit:
1401            bb = self.box()
1402            if isinstance(T, vtk.vtkThinPlateSplineTransform):
1403                TI = vtk.vtkThinPlateSplineTransform()
1404                TI.DeepCopy(T)
1405                TI.Inverse()
1406            else:
1407                TI = vtk.vtkTransform()
1408                TI.DeepCopy(T)
1409            bb.apply_transform(TI)
1410            bounds = bb.GetBounds()
1411            bounds = (
1412                bounds[0]/spacing[0], bounds[1]/spacing[0],
1413                bounds[2]/spacing[1], bounds[3]/spacing[1],
1414                bounds[4]/spacing[2], bounds[5]/spacing[2],
1415            )
1416            bounds = np.round(bounds).astype(int)
1417            reslice.SetOutputExtent(bounds)
1418            reslice.SetOutputSpacing(spacing[0], spacing[1], spacing[2])
1419            reslice.SetOutputOrigin(origin[0], origin[1], origin[2])
1420
1421        reslice.Update()
1422        self._update(reslice.GetOutput())
1423        self.pipeline = utils.OperationNode("apply_transform", parents=[self], c="#4cc9f0")
1424        return self

Class to describe dataset that are defined on "voxels": the 3D equivalent of 2D pixels.

Volume( inputobj=None, c='RdBu_r', alpha=(0.0, 0.0, 0.2, 0.4, 0.8, 1.0), alpha_gradient=None, alpha_unit=1, mode=0, spacing=None, dims=None, origin=None, mapper='smart')
 824    def __init__(
 825        self,
 826        inputobj=None,
 827        c="RdBu_r",
 828        alpha=(0.0, 0.0, 0.2, 0.4, 0.8, 1.0),
 829        alpha_gradient=None,
 830        alpha_unit=1,
 831        mode=0,
 832        spacing=None,
 833        dims=None,
 834        origin=None,
 835        mapper="smart",
 836    ):
 837        """
 838        This class can be initialized with a numpy object, a `vtkImageData`
 839        or a list of 2D bmp files.
 840
 841        Arguments:
 842            c : (list, str)
 843                sets colors along the scalar range, or a matplotlib color map name
 844            alphas : (float, list)
 845                sets transparencies along the scalar range
 846            alpha_unit : (float)
 847                low values make composite rendering look brighter and denser
 848            origin : (list)
 849                set volume origin coordinates
 850            spacing : (list)
 851                voxel dimensions in x, y and z.
 852            dims : (list)
 853                specify the dimensions of the volume.
 854            mapper : (str)
 855                either 'gpu', 'opengl_gpu', 'fixed' or 'smart'
 856            mode : (int)
 857                define the volumetric rendering style:
 858                    - 0, composite rendering
 859                    - 1, maximum projection
 860                    - 2, minimum projection
 861                    - 3, average projection
 862                    - 4, additive mode
 863
 864                <br>The default mode is "composite" where the scalar values are sampled through
 865                the volume and composited in a front-to-back scheme through alpha blending.
 866                The final color and opacity is determined using the color and opacity transfer
 867                functions specified in alpha keyword.
 868
 869                Maximum and minimum intensity blend modes use the maximum and minimum
 870                scalar values, respectively, along the sampling ray.
 871                The final color and opacity is determined by passing the resultant value
 872                through the color and opacity transfer functions.
 873
 874                Additive blend mode accumulates scalar values by passing each value
 875                through the opacity transfer function and then adding up the product
 876                of the value and its opacity. In other words, the scalar values are scaled
 877                using the opacity transfer function and summed to derive the final color.
 878                Note that the resulting image is always grayscale i.e. aggregated values
 879                are not passed through the color transfer function.
 880                This is because the final value is a derived value and not a real data value
 881                along the sampling ray.
 882
 883                Average intensity blend mode works similar to the additive blend mode where
 884                the scalar values are multiplied by opacity calculated from the opacity
 885                transfer function and then added.
 886                The additional step here is to divide the sum by the number of samples
 887                taken through the volume.
 888                As is the case with the additive intensity projection, the final image will
 889                always be grayscale i.e. the aggregated values are not passed through the
 890                color transfer function.
 891
 892        Example:
 893            ```python
 894            from vedo import Volume
 895            vol = Volume("path/to/mydata/rec*.bmp")
 896            vol.show()
 897            ```
 898
 899        Examples:
 900            - [numpy2volume1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/numpy2volume1.py)
 901
 902                ![](https://vedo.embl.es/images/volumetric/numpy2volume1.png)
 903
 904            - [read_volume2.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/read_volume2.py)
 905
 906                ![](https://vedo.embl.es/images/volumetric/read_volume2.png)
 907
 908        .. note:: 
 909            if a `list` of values is used for `alphas` this is interpreted
 910            as a transfer function along the range of the scalar.
 911        """
 912        vtk.vtkVolume.__init__(self)
 913        BaseGrid.__init__(self)
 914        BaseVolume.__init__(self)
 915        # super().__init__()
 916        
 917        ###################
 918        if isinstance(inputobj, str):
 919
 920            if "https://" in inputobj:
 921                inputobj = vedo.io.download(inputobj, verbose=False)  # fpath
 922            elif os.path.isfile(inputobj):
 923                pass
 924            else:
 925                inputobj = sorted(glob.glob(inputobj))
 926
 927        ###################
 928        if "gpu" in mapper:
 929            self._mapper = vtk.vtkGPUVolumeRayCastMapper()
 930        elif "opengl_gpu" in mapper:
 931            self._mapper = vtk.vtkOpenGLGPUVolumeRayCastMapper()
 932        elif "smart" in mapper:
 933            self._mapper = vtk.vtkSmartVolumeMapper()
 934        elif "fixed" in mapper:
 935            self._mapper = vtk.vtkFixedPointVolumeRayCastMapper()
 936        elif isinstance(mapper, vtk.vtkMapper):
 937            self._mapper = mapper
 938        else:
 939            print("Error unknown mapper type", [mapper])
 940            raise RuntimeError()
 941        self.SetMapper(self._mapper)
 942
 943        ###################
 944        inputtype = str(type(inputobj))
 945
 946        # print('Volume inputtype', inputtype, c='b')
 947
 948        if inputobj is None:
 949            img = vtk.vtkImageData()
 950
 951        elif utils.is_sequence(inputobj):
 952
 953            if isinstance(inputobj[0], str) and ".bmp" in inputobj[0].lower():
 954                # scan sequence of BMP files
 955                ima = vtk.vtkImageAppend()
 956                ima.SetAppendAxis(2)
 957                pb = utils.ProgressBar(0, len(inputobj))
 958                for i in pb.range():
 959                    f = inputobj[i]
 960                    if "_rec_spr.bmp" in f:
 961                        continue
 962                    picr = vtk.vtkBMPReader()
 963                    picr.SetFileName(f)
 964                    picr.Update()
 965                    mgf = vtk.vtkImageMagnitude()
 966                    mgf.SetInputData(picr.GetOutput())
 967                    mgf.Update()
 968                    ima.AddInputData(mgf.GetOutput())
 969                    pb.print("loading...")
 970                ima.Update()
 971                img = ima.GetOutput()
 972
 973            else:
 974
 975                if len(inputobj.shape) == 1:
 976                    varr = utils.numpy2vtk(inputobj)
 977                else:
 978                    varr = utils.numpy2vtk(inputobj.ravel(order="F"))
 979                varr.SetName("input_scalars")
 980
 981                img = vtk.vtkImageData()
 982                if dims is not None:
 983                    img.SetDimensions(dims[2],dims[1],dims[0])
 984                else:
 985                    if len(inputobj.shape)==1:
 986                        vedo.logger.error("must set dimensions (dims keyword) in Volume")
 987                        raise RuntimeError()
 988                    img.SetDimensions(inputobj.shape)
 989                img.GetPointData().AddArray(varr)
 990                img.GetPointData().SetActiveScalars(varr.GetName())
 991
 992        elif "ImageData" in inputtype:
 993            img = inputobj
 994
 995        elif isinstance(inputobj, Volume):
 996            img = inputobj.inputdata()
 997
 998        elif "UniformGrid" in inputtype:
 999            img = inputobj
1000
1001        elif hasattr(inputobj, "GetOutput"):  # passing vtk object, try extract imagdedata
1002            if hasattr(inputobj, "Update"):
1003                inputobj.Update()
1004            img = inputobj.GetOutput()
1005
1006        elif isinstance(inputobj, str):
1007            if "https://" in inputobj:
1008                inputobj = vedo.io.download(inputobj, verbose=False)
1009            img = vedo.io.loadImageData(inputobj)
1010
1011        else:
1012            vedo.logger.error(f"cannot understand input type {inputtype}")
1013            return
1014
1015        if dims is not None:
1016            img.SetDimensions(dims)
1017
1018        if origin is not None:
1019            img.SetOrigin(origin)  ### DIFFERENT from volume.origin()!
1020
1021        if spacing is not None:
1022            img.SetSpacing(spacing)
1023
1024        self._data = img
1025        self._mapper.SetInputData(img)
1026
1027        if img.GetPointData().GetScalars():
1028            if img.GetPointData().GetScalars().GetNumberOfComponents() == 1:
1029                self.mode(mode).color(c).alpha(alpha).alpha_gradient(alpha_gradient)
1030                self.GetProperty().SetShade(True)
1031                self.GetProperty().SetInterpolationType(1)
1032                self.GetProperty().SetScalarOpacityUnitDistance(alpha_unit)
1033
1034        # remember stuff:
1035        self._mode = mode
1036        self._color = c
1037        self._alpha = alpha
1038        self._alpha_grad = alpha_gradient
1039        self._alpha_unit = alpha_unit
1040        
1041        self.pipeline = utils.OperationNode("Volume",
1042            comment=f"dims={tuple(self.dimensions())}", c="#4cc9f0",
1043        )
1044        #######################################################################

This class can be initialized with a numpy object, a vtkImageData or a list of 2D bmp files.

Arguments:
  • c : (list, str) sets colors along the scalar range, or a matplotlib color map name
  • alphas : (float, list) sets transparencies along the scalar range
  • alpha_unit : (float) low values make composite rendering look brighter and denser
  • origin : (list) set volume origin coordinates
  • spacing : (list) voxel dimensions in x, y and z.
  • dims : (list) specify the dimensions of the volume.
  • mapper : (str) either 'gpu', 'opengl_gpu', 'fixed' or 'smart'
  • mode : (int) define the volumetric rendering style:

    • 0, composite rendering
    • 1, maximum projection
    • 2, minimum projection
    • 3, average projection
    • 4, additive mode


    The default mode is "composite" where the scalar values are sampled through the volume and composited in a front-to-back scheme through alpha blending. The final color and opacity is determined using the color and opacity transfer functions specified in alpha keyword.

    Maximum and minimum intensity blend modes use the maximum and minimum scalar values, respectively, along the sampling ray. The final color and opacity is determined by passing the resultant value through the color and opacity transfer functions.

    Additive blend mode accumulates scalar values by passing each value through the opacity transfer function and then adding up the product of the value and its opacity. In other words, the scalar values are scaled using the opacity transfer function and summed to derive the final color. Note that the resulting image is always grayscale i.e. aggregated values are not passed through the color transfer function. This is because the final value is a derived value and not a real data value along the sampling ray.

    Average intensity blend mode works similar to the additive blend mode where the scalar values are multiplied by opacity calculated from the opacity transfer function and then added. The additional step here is to divide the sum by the number of samples taken through the volume. As is the case with the additive intensity projection, the final image will always be grayscale i.e. the aggregated values are not passed through the color transfer function.

Example:
from vedo import Volume
vol = Volume("path/to/mydata/rec*.bmp")
vol.show()
Examples:

if a list of values is used for alphas this is interpreted as a transfer function along the range of the scalar.

def mode(self, mode=None):
1055    def mode(self, mode=None):
1056        """
1057        Define the volumetric rendering mode following this:
1058            - 0, composite rendering
1059            - 1, maximum projection rendering
1060            - 2, minimum projection rendering
1061            - 3, average projection rendering
1062            - 4, additive mode
1063
1064        The default mode is "composite" where the scalar values are sampled through
1065        the volume and composited in a front-to-back scheme through alpha blending.
1066        The final color and opacity is determined using the color and opacity transfer
1067        functions specified in alpha keyword.
1068
1069        Maximum and minimum intensity blend modes use the maximum and minimum
1070        scalar values, respectively, along the sampling ray.
1071        The final color and opacity is determined by passing the resultant value
1072        through the color and opacity transfer functions.
1073
1074        Additive blend mode accumulates scalar values by passing each value
1075        through the opacity transfer function and then adding up the product
1076        of the value and its opacity. In other words, the scalar values are scaled
1077        using the opacity transfer function and summed to derive the final color.
1078        Note that the resulting image is always grayscale i.e. aggregated values
1079        are not passed through the color transfer function.
1080        This is because the final value is a derived value and not a real data value
1081        along the sampling ray.
1082
1083        Average intensity blend mode works similar to the additive blend mode where
1084        the scalar values are multiplied by opacity calculated from the opacity
1085        transfer function and then added.
1086        The additional step here is to divide the sum by the number of samples
1087        taken through the volume.
1088        As is the case with the additive intensity projection, the final image will
1089        always be grayscale i.e. the aggregated values are not passed through the
1090        color transfer function.
1091        """
1092        if mode is None:
1093            return self._mapper.GetBlendMode()
1094
1095        if isinstance(mode, str):
1096            if "comp" in mode:
1097                mode = 0
1098            elif "proj" in mode:
1099                if "max" in mode:
1100                    mode = 1
1101                elif "min" in mode:
1102                    mode = 2
1103                elif "ave" in mode:
1104                    mode = 3
1105                else:
1106                    vedo.logger.warning(f"unknown mode {mode}")
1107                    mode = 0
1108            elif "add" in mode:
1109                mode = 4
1110            else:
1111                vedo.logger.warning(f"unknown mode {mode}")
1112                mode = 0
1113
1114        self._mapper.SetBlendMode(mode)
1115        self._mode = mode
1116        return self
Define the volumetric rendering mode following this:
  • 0, composite rendering
  • 1, maximum projection rendering
  • 2, minimum projection rendering
  • 3, average projection rendering
  • 4, additive mode

The default mode is "composite" where the scalar values are sampled through the volume and composited in a front-to-back scheme through alpha blending. The final color and opacity is determined using the color and opacity transfer functions specified in alpha keyword.

Maximum and minimum intensity blend modes use the maximum and minimum scalar values, respectively, along the sampling ray. The final color and opacity is determined by passing the resultant value through the color and opacity transfer functions.

Additive blend mode accumulates scalar values by passing each value through the opacity transfer function and then adding up the product of the value and its opacity. In other words, the scalar values are scaled using the opacity transfer function and summed to derive the final color. Note that the resulting image is always grayscale i.e. aggregated values are not passed through the color transfer function. This is because the final value is a derived value and not a real data value along the sampling ray.

Average intensity blend mode works similar to the additive blend mode where the scalar values are multiplied by opacity calculated from the opacity transfer function and then added. The additional step here is to divide the sum by the number of samples taken through the volume. As is the case with the additive intensity projection, the final image will always be grayscale i.e. the aggregated values are not passed through the color transfer function.

def shade(self, status=None):
1118    def shade(self, status=None):
1119        """
1120        Set/Get the shading of a Volume.
1121        Shading can be further controlled with `volume.lighting()` method.
1122
1123        If shading is turned on, the mapper may perform shading calculations.
1124        In some cases shading does not apply
1125        (for example, in maximum intensity projection mode).
1126        """
1127        if status is None:
1128            return self.GetProperty().GetShade()
1129        self.GetProperty().SetShade(status)
1130        return self

Set/Get the shading of a Volume. Shading can be further controlled with volume.lighting() method.

If shading is turned on, the mapper may perform shading calculations. In some cases shading does not apply (for example, in maximum intensity projection mode).

def cmap(self, c, alpha=None, vmin=None, vmax=None):
1132    def cmap(self, c, alpha=None, vmin=None, vmax=None):
1133        """Same as `color()`.
1134
1135        Arguments:
1136            alpha : (list)
1137                use a list to specify transparencies along the scalar range
1138            vmin : (float)
1139                force the min of the scalar range to be this value
1140            vmax : (float)
1141                force the max of the scalar range to be this value
1142        """
1143        return self.color(c, alpha, vmin, vmax)

Same as color().

Arguments:
  • alpha : (list) use a list to specify transparencies along the scalar range
  • vmin : (float) force the min of the scalar range to be this value
  • vmax : (float) force the max of the scalar range to be this value
def jittering(self, status=None):
1145    def jittering(self, status=None):
1146        """
1147        If `True`, each ray traversal direction will be perturbed slightly
1148        using a noise-texture to get rid of wood-grain effects.
1149        """
1150        if hasattr(self._mapper, "SetUseJittering"):  # tetmesh doesnt have it
1151            if status is None:
1152                return self._mapper.GetUseJittering()
1153            self._mapper.SetUseJittering(status)
1154        return self

If True, each ray traversal direction will be perturbed slightly using a noise-texture to get rid of wood-grain effects.

def mask(self, data):
1156    def mask(self, data):
1157        """
1158        Mask a volume visualization with a binary value.
1159        Needs to specify keyword mapper='gpu'.
1160
1161        Example:
1162        ```python
1163            from vedo import np, Volume, show
1164            data_matrix = np.zeros([75, 75, 75], dtype=np.uint8)
1165            # all voxels have value zero except:
1166            data_matrix[0:35,   0:35,  0:35] = 1
1167            data_matrix[35:55, 35:55, 35:55] = 2
1168            data_matrix[55:74, 55:74, 55:74] = 3
1169            vol = Volume(data_matrix, c=['white','b','g','r'], mapper='gpu')
1170            data_mask = np.zeros_like(data_matrix)
1171            data_mask[10:65, 10:45, 20:75] = 1
1172            vol.mask(data_mask)
1173            show(vol, axes=1).close()
1174        ```
1175        """
1176        mask = Volume(data.astype(np.uint8))
1177        try:
1178            self.mapper().SetMaskTypeToBinary()
1179            self.mapper().SetMaskInput(mask.inputdata())
1180        except AttributeError:
1181            vedo.logger.error("volume.mask() must create the volume with Volume(..., mapper='gpu')")
1182        return self

Mask a volume visualization with a binary value. Needs to specify keyword mapper='gpu'.

Example:

from vedo import np, Volume, show
    data_matrix = np.zeros([75, 75, 75], dtype=np.uint8)
    # all voxels have value zero except:
    data_matrix[0:35,   0:35,  0:35] = 1
    data_matrix[35:55, 35:55, 35:55] = 2
    data_matrix[55:74, 55:74, 55:74] = 3
    vol = Volume(data_matrix, c=['white','b','g','r'], mapper='gpu')
    data_mask = np.zeros_like(data_matrix)
    data_mask[10:65, 10:45, 20:75] = 1