vedo.core
Base classes providing functionality to different vedo objects.
1#!/usr/bin/env python3 2# -*- coding: utf-8 -*- 3import numpy as np 4from typing import List, Union, Any 5from typing_extensions import Self 6 7import vedo.vtkclasses as vtki 8 9import vedo 10from vedo import colors 11from vedo import utils 12from vedo.transformations import LinearTransform, NonLinearTransform 13 14 15__docformat__ = "google" 16 17__doc__ = """ 18Base classes providing functionality to different vedo objects. 19 20![](https://vedo.embl.es/images/feats/algorithms_illustration.png) 21""" 22 23__all__ = [ 24 "DataArrayHelper", 25 "CommonAlgorithms", 26 "PointAlgorithms", 27 "VolumeAlgorithms", 28] 29 30warnings = dict( 31 points_getter=( 32 "WARNING: points() is deprecated, use vertices instead. Change:\n" 33 " mesh.points() -> mesh.vertices\n" 34 " (silence this with vedo.core.warnings['points_getter']=False)" 35 ), 36 points_setter=( 37 "WARNING: points() is deprecated, use vertices instead. Change:\n" 38 " mesh.points([[x,y,z], ...]) -> mesh.vertices = [[x,y,z], ...]\n" 39 " (silence this with vedo.core.warnings['points_getter']=False)" 40 ), 41) 42 43############################################################################### 44class DataArrayHelper: 45 # Internal use only. 46 # Helper class to manage data associated to either 47 # points (or vertices) and cells (or faces). 48 def __init__(self, obj, association): 49 50 self.obj = obj 51 self.association = association 52 53 def __getitem__(self, key): 54 55 if self.association == 0: 56 data = self.obj.dataset.GetPointData() 57 58 elif self.association == 1: 59 data = self.obj.dataset.GetCellData() 60 61 elif self.association == 2: 62 data = self.obj.dataset.GetFieldData() 63 64 varr = data.GetAbstractArray(key) 65 if isinstance(varr, vtki.vtkStringArray): 66 if isinstance(key, int): 67 key = data.GetArrayName(key) 68 n = varr.GetNumberOfValues() 69 narr = [varr.GetValue(i) for i in range(n)] 70 return narr 71 ########### 72 73 else: 74 raise RuntimeError() 75 76 if isinstance(key, int): 77 key = data.GetArrayName(key) 78 79 arr = data.GetArray(key) 80 if not arr: 81 return None 82 return utils.vtk2numpy(arr) 83 84 def __setitem__(self, key, input_array): 85 86 if self.association == 0: 87 data = self.obj.dataset.GetPointData() 88 n = self.obj.dataset.GetNumberOfPoints() 89 self.obj.mapper.SetScalarModeToUsePointData() 90 91 elif self.association == 1: 92 data = self.obj.dataset.GetCellData() 93 n = self.obj.dataset.GetNumberOfCells() 94 self.obj.mapper.SetScalarModeToUseCellData() 95 96 elif self.association == 2: 97 data = self.obj.dataset.GetFieldData() 98 if not utils.is_sequence(input_array): 99 input_array = [input_array] 100 101 if isinstance(input_array[0], str): 102 varr = vtki.vtkStringArray() 103 varr.SetName(key) 104 varr.SetNumberOfComponents(1) 105 varr.SetNumberOfTuples(len(input_array)) 106 for i, iarr in enumerate(input_array): 107 if isinstance(iarr, np.ndarray): 108 iarr = iarr.tolist() # better format 109 # Note: a string k can be converted to numpy with 110 # import json; k = np.array(json.loads(k)) 111 varr.InsertValue(i, str(iarr)) 112 else: 113 try: 114 varr = utils.numpy2vtk(input_array, name=key) 115 except TypeError as e: 116 vedo.logger.error( 117 f"cannot create metadata with input object:\n" 118 f"{input_array}" 119 f"\n\nAllowed content examples are:\n" 120 f"- flat list of strings ['a','b', 1, [1,2,3], ...]" 121 f" (first item must be a string in this case)\n" 122 f" hint: use k = np.array(json.loads(k)) to convert strings\n" 123 f"- numpy arrays of any shape" 124 ) 125 raise e 126 127 data.AddArray(varr) 128 return ############ 129 130 else: 131 raise RuntimeError() 132 133 if len(input_array) != n: 134 vedo.logger.error( 135 f"Error in point/cell data: length of input {len(input_array)}" 136 f" != {n} nr. of elements" 137 ) 138 raise RuntimeError() 139 140 input_array = np.asarray(input_array) 141 varr = utils.numpy2vtk(input_array, name=key) 142 data.AddArray(varr) 143 144 if len(input_array.shape) == 1: # scalars 145 data.SetActiveScalars(key) 146 try: # could be a volume mapper 147 self.obj.mapper.SetScalarRange(data.GetScalars().GetRange()) 148 except AttributeError: 149 pass 150 elif len(input_array.shape) == 2 and input_array.shape[1] == 3: # vectors 151 if key.lower() == "normals": 152 data.SetActiveNormals(key) 153 else: 154 data.SetActiveVectors(key) 155 156 def keys(self) -> List[str]: 157 """Return the list of available data array names""" 158 if self.association == 0: 159 data = self.obj.dataset.GetPointData() 160 elif self.association == 1: 161 data = self.obj.dataset.GetCellData() 162 elif self.association == 2: 163 data = self.obj.dataset.GetFieldData() 164 arrnames = [] 165 for i in range(data.GetNumberOfArrays()): 166 name = "" 167 if self.association == 2: 168 name = data.GetAbstractArray(i).GetName() 169 else: 170 iarr = data.GetArray(i) 171 if iarr: 172 name = iarr.GetName() 173 if name: 174 arrnames.append(name) 175 return arrnames 176 177 def items(self) -> List: 178 """Return the list of available data array `(names, values)`.""" 179 if self.association == 0: 180 data = self.obj.dataset.GetPointData() 181 elif self.association == 1: 182 data = self.obj.dataset.GetCellData() 183 elif self.association == 2: 184 data = self.obj.dataset.GetFieldData() 185 arrnames = [] 186 for i in range(data.GetNumberOfArrays()): 187 if self.association == 2: 188 name = data.GetAbstractArray(i).GetName() 189 else: 190 name = data.GetArray(i).GetName() 191 if name: 192 arrnames.append((name, self[name])) 193 return arrnames 194 195 def todict(self) -> dict: 196 """Return a dictionary of the available data arrays.""" 197 return dict(self.items()) 198 199 def rename(self, oldname: str, newname: str) -> None: 200 """Rename an array""" 201 if self.association == 0: 202 varr = self.obj.dataset.GetPointData().GetArray(oldname) 203 elif self.association == 1: 204 varr = self.obj.dataset.GetCellData().GetArray(oldname) 205 elif self.association == 2: 206 varr = self.obj.dataset.GetFieldData().GetAbstractArray(oldname) 207 if varr: 208 varr.SetName(newname) 209 else: 210 vedo.logger.warning( 211 f"Cannot rename non existing array {oldname} to {newname}" 212 ) 213 214 def remove(self, key: Union[int, str]) -> None: 215 """Remove a data array by name or number""" 216 if self.association == 0: 217 self.obj.dataset.GetPointData().RemoveArray(key) 218 elif self.association == 1: 219 self.obj.dataset.GetCellData().RemoveArray(key) 220 elif self.association == 2: 221 self.obj.dataset.GetFieldData().RemoveArray(key) 222 223 def clear(self) -> None: 224 """Remove all data associated to this object""" 225 if self.association == 0: 226 data = self.obj.dataset.GetPointData() 227 elif self.association == 1: 228 data = self.obj.dataset.GetCellData() 229 elif self.association == 2: 230 data = self.obj.dataset.GetFieldData() 231 for i in range(data.GetNumberOfArrays()): 232 if self.association == 2: 233 name = data.GetAbstractArray(i).GetName() 234 else: 235 name = data.GetArray(i).GetName() 236 data.RemoveArray(name) 237 238 def select(self, key: Union[int, str]) -> Any: 239 """Select one specific array by its name to make it the `active` one.""" 240 # Default (ColorModeToDefault): unsigned char scalars are treated as colors, 241 # and NOT mapped through the lookup table, while everything else is. 242 # ColorModeToDirectScalar extends ColorModeToDefault such that all integer 243 # types are treated as colors with values in the range 0-255 244 # and floating types are treated as colors with values in the range 0.0-1.0. 245 # Setting ColorModeToMapScalars means that all scalar data will be mapped 246 # through the lookup table. 247 # (Note that for multi-component scalars, the particular component 248 # to use for mapping can be specified using the SelectColorArray() method.) 249 if self.association == 0: 250 data = self.obj.dataset.GetPointData() 251 self.obj.mapper.SetScalarModeToUsePointData() 252 else: 253 data = self.obj.dataset.GetCellData() 254 self.obj.mapper.SetScalarModeToUseCellData() 255 256 if isinstance(key, int): 257 key = data.GetArrayName(key) 258 259 arr = data.GetArray(key) 260 if not arr: 261 return self.obj 262 263 nc = arr.GetNumberOfComponents() 264 # print("GetNumberOfComponents", nc) 265 if nc == 1: 266 data.SetActiveScalars(key) 267 elif nc == 2: 268 data.SetTCoords(arr) 269 elif nc in (3, 4): 270 if "rgb" in key.lower(): # type: ignore 271 data.SetActiveScalars(key) 272 try: 273 # could be a volume mapper 274 self.obj.mapper.SetColorModeToDirectScalars() 275 data.SetActiveVectors(None) # need this to fix bug in #1066 276 # print("SetColorModeToDirectScalars for", key) 277 except AttributeError: 278 pass 279 else: 280 data.SetActiveVectors(key) 281 elif nc == 9: 282 data.SetActiveTensors(key) 283 else: 284 vedo.logger.error(f"Cannot select array {key} with {nc} components") 285 return self.obj 286 287 try: 288 # could be a volume mapper 289 self.obj.mapper.SetArrayName(key) 290 self.obj.mapper.ScalarVisibilityOn() 291 except AttributeError: 292 pass 293 294 return self.obj 295 296 def select_texture_coords(self, key: Union[int,str]) -> Any: 297 """Select one specific array to be used as texture coordinates.""" 298 if self.association == 0: 299 data = self.obj.dataset.GetPointData() 300 else: 301 vedo.logger.warning("texture coordinates are only available for point data") 302 return 303 304 if isinstance(key, int): 305 key = data.GetArrayName(key) 306 data.SetTCoords(data.GetArray(key)) 307 return self.obj 308 309 def select_normals(self, key: Union[int,str]) -> Any: 310 """Select one specific normal array by its name to make it the "active" one.""" 311 if self.association == 0: 312 data = self.obj.dataset.GetPointData() 313 self.obj.mapper.SetScalarModeToUsePointData() 314 else: 315 data = self.obj.dataset.GetCellData() 316 self.obj.mapper.SetScalarModeToUseCellData() 317 318 if isinstance(key, int): 319 key = data.GetArrayName(key) 320 data.SetActiveNormals(key) 321 return self.obj 322 323 def print(self, **kwargs) -> None: 324 """Print the array names available to terminal""" 325 colors.printc(self.keys(), **kwargs) 326 327 def __repr__(self) -> str: 328 """Representation""" 329 330 def _get_str(pd, header): 331 out = f"\x1b[2m\x1b[1m\x1b[7m{header}" 332 if pd.GetNumberOfArrays(): 333 if self.obj.name: 334 out += f" in {self.obj.name}" 335 out += f" contains {pd.GetNumberOfArrays()} array(s)\x1b[0m" 336 for i in range(pd.GetNumberOfArrays()): 337 varr = pd.GetArray(i) 338 out += f"\n\x1b[1m\x1b[4mArray name : {varr.GetName()}\x1b[0m" 339 out += "\nindex".ljust(15) + f": {i}" 340 t = varr.GetDataType() 341 if t in vtki.array_types: 342 out += "\ntype".ljust(15) 343 out += f": {vtki.array_types[t]}" 344 shape = (varr.GetNumberOfTuples(), varr.GetNumberOfComponents()) 345 out += "\nshape".ljust(15) + f": {shape}" 346 out += "\nrange".ljust(15) + f": {np.array(varr.GetRange())}" 347 out += "\nmax id".ljust(15) + f": {varr.GetMaxId()}" 348 out += "\nlook up table".ljust(15) + f": {bool(varr.GetLookupTable())}" 349 out += "\nin-memory size".ljust(15) + f": {varr.GetActualMemorySize()} KB" 350 else: 351 out += " is empty.\x1b[0m" 352 return out 353 354 if self.association == 0: 355 out = _get_str(self.obj.dataset.GetPointData(), "Point Data") 356 elif self.association == 1: 357 out = _get_str(self.obj.dataset.GetCellData(), "Cell Data") 358 elif self.association == 2: 359 pd = self.obj.dataset.GetFieldData() 360 if pd.GetNumberOfArrays(): 361 out = "\x1b[2m\x1b[1m\x1b[7mMeta Data" 362 if self.obj.name: 363 out += f" in {self.obj.name}" 364 out += f" contains {pd.GetNumberOfArrays()} entries\x1b[0m" 365 for i in range(pd.GetNumberOfArrays()): 366 varr = pd.GetAbstractArray(i) 367 out += f"\n\x1b[1m\x1b[4mEntry name : {varr.GetName()}\x1b[0m" 368 out += "\nindex".ljust(15) + f": {i}" 369 shape = (varr.GetNumberOfTuples(), varr.GetNumberOfComponents()) 370 out += "\nshape".ljust(15) + f": {shape}" 371 372 return out 373 374 375############################################################################### 376class CommonAlgorithms: 377 """Common algorithms.""" 378 379 @property 380 def pointdata(self): 381 """ 382 Create and/or return a `numpy.array` associated to points (vertices). 383 A data array can be indexed either as a string or by an integer number. 384 E.g.: `myobj.pointdata["arrayname"]` 385 386 Usage: 387 388 `myobj.pointdata.keys()` to return the available data array names 389 390 `myobj.pointdata.select(name)` to make this array the active one 391 392 `myobj.pointdata.remove(name)` to remove this array 393 """ 394 return DataArrayHelper(self, 0) 395 396 @property 397 def celldata(self): 398 """ 399 Create and/or return a `numpy.array` associated to cells (faces). 400 A data array can be indexed either as a string or by an integer number. 401 E.g.: `myobj.celldata["arrayname"]` 402 403 Usage: 404 405 `myobj.celldata.keys()` to return the available data array names 406 407 `myobj.celldata.select(name)` to make this array the active one 408 409 `myobj.celldata.remove(name)` to remove this array 410 """ 411 return DataArrayHelper(self, 1) 412 413 @property 414 def metadata(self): 415 """ 416 Create and/or return a `numpy.array` associated to neither cells nor faces. 417 A data array can be indexed either as a string or by an integer number. 418 E.g.: `myobj.metadata["arrayname"]` 419 420 Usage: 421 422 `myobj.metadata.keys()` to return the available data array names 423 424 `myobj.metadata.select(name)` to make this array the active one 425 426 `myobj.metadata.remove(name)` to remove this array 427 """ 428 return DataArrayHelper(self, 2) 429 430 def memory_address(self) -> int: 431 """ 432 Return a unique memory address integer which may serve as the ID of the 433 object, or passed to c++ code. 434 """ 435 # https://www.linkedin.com/pulse/speedup-your-code-accessing-python-vtk-objects-from-c-pletzer/ 436 # https://github.com/tfmoraes/polydata_connectivity 437 return int(self.dataset.GetAddressAsString("")[5:], 16) 438 439 def memory_size(self) -> int: 440 """Return the size in bytes of the object in memory.""" 441 return self.dataset.GetActualMemorySize() 442 443 def modified(self) -> Self: 444 """Use in conjunction with `tonumpy()` to update any modifications to the image array.""" 445 self.dataset.GetPointData().Modified() 446 scals = self.dataset.GetPointData().GetScalars() 447 if scals: 448 scals.Modified() 449 return self 450 451 def box(self, scale=1, padding=0) -> "vedo.Mesh": 452 """ 453 Return the bounding box as a new `Mesh` object. 454 455 Arguments: 456 scale : (float) 457 box size can be scaled by a factor 458 padding : (float, list) 459 a constant padding can be added (can be a list `[padx,pady,padz]`) 460 """ 461 b = self.bounds() 462 if not utils.is_sequence(padding): 463 padding = [padding, padding, padding] 464 length, width, height = b[1] - b[0], b[3] - b[2], b[5] - b[4] 465 tol = (length + width + height) / 30000 # useful for boxing text 466 pos = [(b[0] + b[1]) / 2, (b[3] + b[2]) / 2, (b[5] + b[4]) / 2 - tol] 467 bx = vedo.shapes.Box( 468 pos, 469 length * scale + padding[0], 470 width * scale + padding[1], 471 height * scale + padding[2], 472 c="gray", 473 ) 474 try: 475 pr = vtki.vtkProperty() 476 pr.DeepCopy(self.properties) 477 bx.actor.SetProperty(pr) 478 bx.properties = pr 479 except (AttributeError, TypeError): 480 pass 481 bx.flat().lighting("off").wireframe(True) 482 return bx 483 484 def update_dataset(self, dataset, **kwargs) -> Self: 485 """Update the dataset of the object with the provided VTK dataset.""" 486 self._update(dataset, **kwargs) 487 return self 488 489 def bounds(self) -> np.ndarray: 490 """ 491 Get the object bounds. 492 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 493 """ 494 try: # this is very slow for large meshes 495 pts = self.vertices 496 xmin, ymin, zmin = np.min(pts, axis=0) 497 xmax, ymax, zmax = np.max(pts, axis=0) 498 return np.array([xmin, xmax, ymin, ymax, zmin, zmax]) 499 except (AttributeError, ValueError): 500 return np.array(self.dataset.GetBounds()) 501 502 def xbounds(self, i=None) -> np.ndarray: 503 """Get the bounds `[xmin,xmax]`. Can specify upper or lower with i (0,1).""" 504 b = self.bounds() 505 if i is not None: 506 return b[i] 507 return np.array([b[0], b[1]]) 508 509 def ybounds(self, i=None) -> np.ndarray: 510 """Get the bounds `[ymin,ymax]`. Can specify upper or lower with i (0,1).""" 511 b = self.bounds() 512 if i == 0: 513 return b[2] 514 if i == 1: 515 return b[3] 516 return np.array([b[2], b[3]]) 517 518 def zbounds(self, i=None) -> np.ndarray: 519 """Get the bounds `[zmin,zmax]`. Can specify upper or lower with i (0,1).""" 520 b = self.bounds() 521 if i == 0: 522 return b[4] 523 if i == 1: 524 return b[5] 525 return np.array([b[4], b[5]]) 526 527 def diagonal_size(self) -> float: 528 """Get the length of the diagonal of the bounding box.""" 529 b = self.bounds() 530 return np.sqrt((b[1] - b[0])**2 + (b[3] - b[2])**2 + (b[5] - b[4])**2) 531 532 def average_size(self) -> float: 533 """ 534 Calculate and return the average size of the object. 535 This is the mean of the vertex distances from the center of mass. 536 """ 537 coords = self.vertices 538 cm = np.mean(coords, axis=0) 539 if coords.shape[0] == 0: 540 return 0.0 541 cc = coords - cm 542 return np.mean(np.linalg.norm(cc, axis=1)) 543 544 def center_of_mass(self) -> np.ndarray: 545 """Get the center of mass of the object.""" 546 if isinstance(self, (vedo.RectilinearGrid, vedo.Volume)): 547 return np.array(self.dataset.GetCenter()) 548 cmf = vtki.new("CenterOfMass") 549 cmf.SetInputData(self.dataset) 550 cmf.Update() 551 c = cmf.GetCenter() 552 return np.array(c) 553 554 def copy_data_from(self, obj: Any) -> Self: 555 """Copy all data (point and cell data) from this input object""" 556 self.dataset.GetPointData().PassData(obj.dataset.GetPointData()) 557 self.dataset.GetCellData().PassData(obj.dataset.GetCellData()) 558 self.pipeline = utils.OperationNode( 559 "copy_data_from", 560 parents=[self, obj], 561 comment=f"{obj.__class__.__name__}", 562 shape="note", 563 c="#ccc5b9", 564 ) 565 return self 566 567 def inputdata(self): 568 """Obsolete, use `.dataset` instead.""" 569 colors.printc("WARNING: 'inputdata()' is obsolete, use '.dataset' instead.", c="y") 570 return self.dataset 571 572 @property 573 def npoints(self): 574 """Retrieve the number of points (or vertices).""" 575 return self.dataset.GetNumberOfPoints() 576 577 @property 578 def nvertices(self): 579 """Retrieve the number of vertices (or points).""" 580 return self.dataset.GetNumberOfPoints() 581 582 @property 583 def ncells(self): 584 """Retrieve the number of cells.""" 585 return self.dataset.GetNumberOfCells() 586 587 def points(self, pts=None): 588 """Obsolete, use `self.vertices` or `self.coordinates` instead.""" 589 if pts is None: ### getter 590 591 if warnings["points_getter"]: 592 colors.printc(warnings["points_getter"], c="y") 593 warnings["points_getter"] = "" 594 return self.vertices 595 596 else: ### setter 597 598 if warnings["points_setter"]: 599 colors.printc(warnings["points_setter"], c="y") 600 warnings["points_setter"] = "" 601 602 pts = np.asarray(pts, dtype=np.float32) 603 604 if pts.ndim == 1: 605 ### getter by point index ################### 606 indices = pts.astype(int) 607 vpts = self.dataset.GetPoints() 608 arr = utils.vtk2numpy(vpts.GetData()) 609 return arr[indices] ########### 610 611 ### setter #################################### 612 if pts.shape[1] == 2: 613 pts = np.c_[pts, np.zeros(pts.shape[0], dtype=np.float32)] 614 arr = utils.numpy2vtk(pts, dtype=np.float32) 615 616 vpts = self.dataset.GetPoints() 617 vpts.SetData(arr) 618 vpts.Modified() 619 # reset mesh to identity matrix position/rotation: 620 self.point_locator = None 621 self.cell_locator = None 622 self.line_locator = None 623 self.transform = LinearTransform() 624 return self 625 626 @property 627 def cell_centers(self): 628 """ 629 Get the coordinates of the cell centers. 630 631 Examples: 632 - [delaunay2d.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/delaunay2d.py) 633 634 See also: `CellCenters()`. 635 """ 636 vcen = vtki.new("CellCenters") 637 vcen.CopyArraysOff() 638 vcen.SetInputData(self.dataset) 639 vcen.Update() 640 return utils.vtk2numpy(vcen.GetOutput().GetPoints().GetData()) 641 642 @property 643 def lines(self): 644 """ 645 Get lines connectivity ids as a python array 646 formatted as `[[id0,id1], [id3,id4], ...]` 647 648 See also: `lines_as_flat_array()`. 649 """ 650 # Get cell connettivity ids as a 1D array. The vtk format is: 651 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 652 try: 653 arr1d = utils.vtk2numpy(self.dataset.GetLines().GetData()) 654 except AttributeError: 655 return np.array([]) 656 i = 0 657 conn = [] 658 n = len(arr1d) 659 for _ in range(n): 660 cell = [arr1d[i + k + 1] for k in range(arr1d[i])] 661 conn.append(cell) 662 i += arr1d[i] + 1 663 if i >= n: 664 break 665 666 return conn # cannot always make a numpy array of it! 667 668 @property 669 def lines_as_flat_array(self): 670 """ 671 Get lines connectivity ids as a 1D numpy array. 672 Format is e.g. [2, 10,20, 3, 10,11,12, 2, 70,80, ...] 673 674 See also: `lines()`. 675 """ 676 try: 677 return utils.vtk2numpy(self.dataset.GetLines().GetData()) 678 except AttributeError: 679 return np.array([]) 680 681 def mark_boundaries(self) -> Self: 682 """ 683 Mark cells and vertices if they lie on a boundary. 684 A new array called `BoundaryCells` is added to the object. 685 """ 686 mb = vtki.new("MarkBoundaryFilter") 687 mb.SetInputData(self.dataset) 688 mb.Update() 689 self.dataset.DeepCopy(mb.GetOutput()) 690 self.pipeline = utils.OperationNode("mark_boundaries", parents=[self]) 691 return self 692 693 def find_cells_in_bounds(self, xbounds=(), ybounds=(), zbounds=()) -> np.ndarray: 694 """ 695 Find cells that are within the specified bounds. 696 """ 697 try: 698 xbounds = list(xbounds.bounds()) 699 except AttributeError: 700 pass 701 702 if len(xbounds) == 6: 703 bnds = xbounds 704 else: 705 bnds = list(self.bounds()) 706 if len(xbounds) == 2: 707 bnds[0] = xbounds[0] 708 bnds[1] = xbounds[1] 709 if len(ybounds) == 2: 710 bnds[2] = ybounds[0] 711 bnds[3] = ybounds[1] 712 if len(zbounds) == 2: 713 bnds[4] = zbounds[0] 714 bnds[5] = zbounds[1] 715 716 cell_ids = vtki.vtkIdList() 717 if not self.cell_locator: 718 self.cell_locator = vtki.new("CellTreeLocator") 719 self.cell_locator.SetDataSet(self.dataset) 720 self.cell_locator.BuildLocator() 721 self.cell_locator.FindCellsWithinBounds(bnds, cell_ids) 722 cids = [] 723 for i in range(cell_ids.GetNumberOfIds()): 724 cid = cell_ids.GetId(i) 725 cids.append(cid) 726 return np.array(cids) 727 728 def find_cells_along_line(self, p0, p1, tol=0.001) -> np.ndarray: 729 """ 730 Find cells that are intersected by a line segment. 731 """ 732 cell_ids = vtki.vtkIdList() 733 if not self.cell_locator: 734 self.cell_locator = vtki.new("CellTreeLocator") 735 self.cell_locator.SetDataSet(self.dataset) 736 self.cell_locator.BuildLocator() 737 self.cell_locator.FindCellsAlongLine(p0, p1, tol, cell_ids) 738 cids = [] 739 for i in range(cell_ids.GetNumberOfIds()): 740 cid = cell_ids.GetId(i) 741 cids.append(cid) 742 return np.array(cids) 743 744 def find_cells_along_plane(self, origin, normal, tol=0.001) -> np.ndarray: 745 """ 746 Find cells that are intersected by a plane. 747 """ 748 cell_ids = vtki.vtkIdList() 749 if not self.cell_locator: 750 self.cell_locator = vtki.new("CellTreeLocator") 751 self.cell_locator.SetDataSet(self.dataset) 752 self.cell_locator.BuildLocator() 753 self.cell_locator.FindCellsAlongPlane(origin, normal, tol, cell_ids) 754 cids = [] 755 for i in range(cell_ids.GetNumberOfIds()): 756 cid = cell_ids.GetId(i) 757 cids.append(cid) 758 return np.array(cids) 759 760 def keep_cell_types(self, types=()): 761 """ 762 Extract cells of a specific type. 763 764 Check the VTK cell types here: 765 https://vtk.org/doc/nightly/html/vtkCellType_8h.html 766 """ 767 fe = vtki.new("ExtractCellsByType") 768 fe.SetInputData(self.dataset) 769 for t in types: 770 try: 771 if utils.is_integer(t): 772 it = t 773 else: 774 it = vtki.cell_types[t.upper()] 775 except KeyError: 776 vedo.logger.error(f"Cell type '{t}' not recognized") 777 continue 778 fe.AddCellType(it) 779 fe.Update() 780 self._update(fe.GetOutput()) 781 return self 782 783 def map_cells_to_points(self, arrays=(), move=False) -> Self: 784 """ 785 Interpolate cell data (i.e., data specified per cell or face) 786 into point data (i.e., data specified at each vertex). 787 The method of transformation is based on averaging the data values 788 of all cells using a particular point. 789 790 A custom list of arrays to be mapped can be passed in input. 791 792 Set `move=True` to delete the original `celldata` array. 793 """ 794 c2p = vtki.new("CellDataToPointData") 795 c2p.SetInputData(self.dataset) 796 if not move: 797 c2p.PassCellDataOn() 798 if arrays: 799 c2p.ClearCellDataArrays() 800 c2p.ProcessAllArraysOff() 801 for arr in arrays: 802 c2p.AddCellDataArray(arr) 803 else: 804 c2p.ProcessAllArraysOn() 805 c2p.Update() 806 self._update(c2p.GetOutput(), reset_locators=False) 807 self.mapper.SetScalarModeToUsePointData() 808 self.pipeline = utils.OperationNode("map_cells_to_points", parents=[self]) 809 return self 810 811 @property 812 def vertices(self): 813 """Return the vertices (points) coordinates.""" 814 try: 815 # for polydata and unstructured grid 816 varr = self.dataset.GetPoints().GetData() 817 except (AttributeError, TypeError): 818 try: 819 # for RectilinearGrid, StructuredGrid 820 vpts = vtki.vtkPoints() 821 self.dataset.GetPoints(vpts) 822 varr = vpts.GetData() 823 except (AttributeError, TypeError): 824 try: 825 # for ImageData 826 v2p = vtki.new("ImageToPoints") 827 v2p.SetInputData(self.dataset) 828 v2p.Update() 829 varr = v2p.GetOutput().GetPoints().GetData() 830 except AttributeError: 831 return np.array([]) 832 833 return utils.vtk2numpy(varr) 834 835 # setter 836 @vertices.setter 837 def vertices(self, pts): 838 """Set vertices (points) coordinates.""" 839 pts = utils.make3d(pts) 840 arr = utils.numpy2vtk(pts, dtype=np.float32) 841 try: 842 vpts = self.dataset.GetPoints() 843 vpts.SetData(arr) 844 vpts.Modified() 845 except (AttributeError, TypeError): 846 vedo.logger.error(f"Cannot set vertices for {type(self)}") 847 return self 848 # reset mesh to identity matrix position/rotation: 849 self.point_locator = None 850 self.cell_locator = None 851 self.line_locator = None 852 self.transform = LinearTransform() 853 854 @property 855 def coordinates(self): 856 """Return the vertices (points) coordinates. Same as `vertices`.""" 857 return self.vertices 858 859 @coordinates.setter 860 def coordinates(self, pts): 861 """Set vertices (points) coordinates. Same as `vertices`.""" 862 self.vertices = pts 863 864 @property 865 def cells_as_flat_array(self): 866 """ 867 Get cell connectivity ids as a 1D numpy array. 868 Format is e.g. [3, 10,20,30 4, 10,11,12,13 ...] 869 """ 870 try: 871 # valid for unstructured grid 872 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 873 except AttributeError: 874 # valid for polydata 875 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 876 return arr1d 877 878 @property 879 def cells(self): 880 """ 881 Get the cells connectivity ids as a numpy array. 882 883 The output format is: `[[id0 ... idn], [id0 ... idm], etc]`. 884 """ 885 try: 886 # valid for unstructured grid 887 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 888 except AttributeError: 889 try: 890 # valid for polydata 891 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 892 except AttributeError: 893 vedo.logger.warning(f"Cannot get cells for {type(self)}") 894 return np.array([]) 895 896 # Get cell connettivity ids as a 1D array. vtk format is: 897 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 898 i = 0 899 conn = [] 900 n = len(arr1d) 901 if n: 902 while True: 903 cell = [arr1d[i + k] for k in range(1, arr1d[i] + 1)] 904 conn.append(cell) 905 i += arr1d[i] + 1 906 if i >= n: 907 break 908 return conn 909 910 def map_points_to_cells(self, arrays=(), move=False) -> Self: 911 """ 912 Interpolate point data (i.e., data specified per point or vertex) 913 into cell data (i.e., data specified per cell). 914 The method of transformation is based on averaging the data values 915 of all points defining a particular cell. 916 917 A custom list of arrays to be mapped can be passed in input. 918 919 Set `move=True` to delete the original `pointdata` array. 920 921 Examples: 922 - [mesh_map2cell.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/mesh_map2cell.py) 923 """ 924 p2c = vtki.new("PointDataToCellData") 925 p2c.SetInputData(self.dataset) 926 if not move: 927 p2c.PassPointDataOn() 928 if arrays: 929 p2c.ClearPointDataArrays() 930 p2c.ProcessAllArraysOff() 931 for arr in arrays: 932 p2c.AddPointDataArray(arr) 933 else: 934 p2c.ProcessAllArraysOn() 935 p2c.Update() 936 self._update(p2c.GetOutput(), reset_locators=False) 937 self.mapper.SetScalarModeToUseCellData() 938 self.pipeline = utils.OperationNode("map_points_to_cells", parents=[self]) 939 return self 940 941 def resample_data_from(self, source, tol=None, categorical=False) -> Self: 942 """ 943 Resample point and cell data from another dataset. 944 The output has the same structure but its point data have 945 the resampled values from target. 946 947 Use `tol` to set the tolerance used to compute whether 948 a point in the source is in a cell of the current object. 949 Points without resampled values, and their cells, are marked as blank. 950 If the data is categorical, then the resulting data will be determined 951 by a nearest neighbor interpolation scheme. 952 953 Example: 954 ```python 955 from vedo import * 956 m1 = Mesh(dataurl+'bunny.obj')#.add_gaussian_noise(0.1) 957 pts = m1.vertices 958 ces = m1.cell_centers 959 m1.pointdata["xvalues"] = np.power(pts[:,0], 3) 960 m1.celldata["yvalues"] = np.power(ces[:,1], 3) 961 m2 = Mesh(dataurl+'bunny.obj') 962 m2.resample_data_from(m1) 963 # print(m2.pointdata["xvalues"]) 964 show(m1, m2 , N=2, axes=1) 965 ``` 966 """ 967 rs = vtki.new("ResampleWithDataSet") 968 rs.SetInputData(self.dataset) 969 rs.SetSourceData(source.dataset) 970 971 rs.SetPassPointArrays(True) 972 rs.SetPassCellArrays(True) 973 rs.SetPassFieldArrays(True) 974 rs.SetCategoricalData(categorical) 975 976 rs.SetComputeTolerance(True) 977 if tol: 978 rs.SetComputeTolerance(False) 979 rs.SetTolerance(tol) 980 rs.Update() 981 self._update(rs.GetOutput(), reset_locators=False) 982 self.pipeline = utils.OperationNode( 983 "resample_data_from", 984 comment=f"{source.__class__.__name__}", 985 parents=[self, source], 986 ) 987 return self 988 989 def interpolate_data_from( 990 self, 991 source, 992 radius=None, 993 n=None, 994 kernel="shepard", 995 exclude=("Normals",), 996 on="points", 997 null_strategy=1, 998 null_value=0, 999 ) -> Self: 1000 """ 1001 Interpolate over source to port its data onto the current object using various kernels. 1002 1003 If n (number of closest points to use) is set then radius value is ignored. 1004 1005 Check out also: 1006 `probe()` which in many cases can be faster. 1007 1008 Arguments: 1009 kernel : (str) 1010 available kernels are [shepard, gaussian, linear] 1011 null_strategy : (int) 1012 specify a strategy to use when encountering a "null" point 1013 during the interpolation process. Null points occur when the local neighborhood 1014 (of nearby points to interpolate from) is empty. 1015 1016 - Case 0: an output array is created that marks points 1017 as being valid (=1) or null (invalid =0), and the null_value is set as well 1018 - Case 1: the output data value(s) are set to the provided null_value 1019 - Case 2: simply use the closest point to perform the interpolation. 1020 null_value : (float) 1021 see above. 1022 1023 Examples: 1024 - [interpolate_scalar1.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar1.py) 1025 - [interpolate_scalar3.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar3.py) 1026 - [interpolate_scalar4.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar4.py) 1027 - [image_probe.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/image_probe.py) 1028 1029 ![](https://vedo.embl.es/images/advanced/interpolateMeshArray.png) 1030 """ 1031 if radius is None and not n: 1032 vedo.logger.error("in interpolate_data_from(): please set either radius or n") 1033 raise RuntimeError 1034 1035 if on == "points": 1036 points = source.dataset 1037 elif on == "cells": 1038 c2p = vtki.new("CellDataToPointData") 1039 c2p.SetInputData(source.dataset) 1040 c2p.Update() 1041 points = c2p.GetOutput() 1042 else: 1043 vedo.logger.error("in interpolate_data_from(), on must be on points or cells") 1044 raise RuntimeError() 1045 1046 locator = vtki.new("PointLocator") 1047 locator.SetDataSet(points) 1048 locator.BuildLocator() 1049 1050 if kernel.lower() == "shepard": 1051 kern = vtki.new("ShepardKernel") 1052 kern.SetPowerParameter(2) 1053 elif kernel.lower() == "gaussian": 1054 kern = vtki.new("GaussianKernel") 1055 kern.SetSharpness(2) 1056 elif kernel.lower() == "linear": 1057 kern = vtki.new("LinearKernel") 1058 else: 1059 vedo.logger.error("available kernels are: [shepard, gaussian, linear]") 1060 raise RuntimeError() 1061 1062 if n: 1063 kern.SetNumberOfPoints(n) 1064 kern.SetKernelFootprintToNClosest() 1065 else: 1066 kern.SetRadius(radius) 1067 kern.SetKernelFootprintToRadius() 1068 1069 interpolator = vtki.new("PointInterpolator") 1070 interpolator.SetInputData(self.dataset) 1071 interpolator.SetSourceData(points) 1072 interpolator.SetKernel(kern) 1073 interpolator.SetLocator(locator) 1074 interpolator.PassFieldArraysOn() 1075 interpolator.SetNullPointsStrategy(null_strategy) 1076 interpolator.SetNullValue(null_value) 1077 interpolator.SetValidPointsMaskArrayName("ValidPointMask") 1078 for ex in exclude: 1079 interpolator.AddExcludedArray(ex) 1080 1081 # remove arrays that are already present in the source 1082 # this is because the interpolator will ignore them otherwise 1083 for i in range(self.dataset.GetPointData().GetNumberOfArrays()): 1084 name = self.dataset.GetPointData().GetArrayName(i) 1085 if name not in exclude: 1086 self.dataset.GetPointData().RemoveArray(name) 1087 1088 interpolator.Update() 1089 1090 if on == "cells": 1091 p2c = vtki.new("PointDataToCellData") 1092 p2c.SetInputData(interpolator.GetOutput()) 1093 p2c.Update() 1094 cpoly = p2c.GetOutput() 1095 else: 1096 cpoly = interpolator.GetOutput() 1097 1098 self._update(cpoly, reset_locators=False) 1099 1100 self.pipeline = utils.OperationNode("interpolate_data_from", parents=[self, source]) 1101 return self 1102 1103 def add_ids(self) -> Self: 1104 """ 1105 Generate point and cell ids arrays. 1106 1107 Two new arrays are added to the mesh: `PointID` and `CellID`. 1108 """ 1109 ids = vtki.new("IdFilter") 1110 ids.SetInputData(self.dataset) 1111 ids.PointIdsOn() 1112 ids.CellIdsOn() 1113 ids.FieldDataOff() 1114 ids.SetPointIdsArrayName("PointID") 1115 ids.SetCellIdsArrayName("CellID") 1116 ids.Update() 1117 self._update(ids.GetOutput(), reset_locators=False) 1118 self.pipeline = utils.OperationNode("add_ids", parents=[self]) 1119 return self 1120 1121 def gradient(self, input_array=None, on="points", fast=False) -> np.ndarray: 1122 """ 1123 Compute and return the gradiend of the active scalar field as a numpy array. 1124 1125 Arguments: 1126 input_array : (str) 1127 array of the scalars to compute the gradient, 1128 if None the current active array is selected 1129 on : (str) 1130 compute either on 'points' or 'cells' data 1131 fast : (bool) 1132 if True, will use a less accurate algorithm 1133 that performs fewer derivative calculations (and is therefore faster). 1134 1135 Examples: 1136 - [isolines.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/isolines.py) 1137 1138 ![](https://user-images.githubusercontent.com/32848391/72433087-f00a8780-3798-11ea-9778-991f0abeca70.png) 1139 """ 1140 gra = vtki.new("GradientFilter") 1141 if on.startswith("p"): 1142 varr = self.dataset.GetPointData() 1143 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1144 elif on.startswith("c"): 1145 varr = self.dataset.GetCellData() 1146 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1147 else: 1148 vedo.logger.error(f"in gradient: unknown option {on}") 1149 raise RuntimeError 1150 1151 if input_array is None: 1152 if varr.GetScalars(): 1153 input_array = varr.GetScalars().GetName() 1154 else: 1155 vedo.logger.error(f"in gradient: no scalars found for {on}") 1156 raise RuntimeError 1157 1158 gra.SetInputData(self.dataset) 1159 gra.SetInputScalars(tp, input_array) 1160 gra.SetResultArrayName("Gradient") 1161 gra.SetFasterApproximation(fast) 1162 gra.ComputeDivergenceOff() 1163 gra.ComputeVorticityOff() 1164 gra.ComputeGradientOn() 1165 gra.Update() 1166 if on.startswith("p"): 1167 gvecs = utils.vtk2numpy(gra.GetOutput().GetPointData().GetArray("Gradient")) 1168 else: 1169 gvecs = utils.vtk2numpy(gra.GetOutput().GetCellData().GetArray("Gradient")) 1170 return gvecs 1171 1172 def divergence(self, array_name=None, on="points", fast=False) -> np.ndarray: 1173 """ 1174 Compute and return the divergence of a vector field as a numpy array. 1175 1176 Arguments: 1177 array_name : (str) 1178 name of the array of vectors to compute the divergence, 1179 if None the current active array is selected 1180 on : (str) 1181 compute either on 'points' or 'cells' data 1182 fast : (bool) 1183 if True, will use a less accurate algorithm 1184 that performs fewer derivative calculations (and is therefore faster). 1185 """ 1186 div = vtki.new("GradientFilter") 1187 if on.startswith("p"): 1188 varr = self.dataset.GetPointData() 1189 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1190 elif on.startswith("c"): 1191 varr = self.dataset.GetCellData() 1192 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1193 else: 1194 vedo.logger.error(f"in divergence(): unknown option {on}") 1195 raise RuntimeError 1196 1197 if array_name is None: 1198 if varr.GetVectors(): 1199 array_name = varr.GetVectors().GetName() 1200 else: 1201 vedo.logger.error(f"in divergence(): no vectors found for {on}") 1202 raise RuntimeError 1203 1204 div.SetInputData(self.dataset) 1205 div.SetInputScalars(tp, array_name) 1206 div.ComputeDivergenceOn() 1207 div.ComputeGradientOff() 1208 div.ComputeVorticityOff() 1209 div.SetDivergenceArrayName("Divergence") 1210 div.SetFasterApproximation(fast) 1211 div.Update() 1212 if on.startswith("p"): 1213 dvecs = utils.vtk2numpy(div.GetOutput().GetPointData().GetArray("Divergence")) 1214 else: 1215 dvecs = utils.vtk2numpy(div.GetOutput().GetCellData().GetArray("Divergence")) 1216 return dvecs 1217 1218 def vorticity(self, array_name=None, on="points", fast=False) -> np.ndarray: 1219 """ 1220 Compute and return the vorticity of a vector field as a numpy array. 1221 1222 Arguments: 1223 array_name : (str) 1224 name of the array to compute the vorticity, 1225 if None the current active array is selected 1226 on : (str) 1227 compute either on 'points' or 'cells' data 1228 fast : (bool) 1229 if True, will use a less accurate algorithm 1230 that performs fewer derivative calculations (and is therefore faster). 1231 """ 1232 vort = vtki.new("GradientFilter") 1233 if on.startswith("p"): 1234 varr = self.dataset.GetPointData() 1235 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1236 elif on.startswith("c"): 1237 varr = self.dataset.GetCellData() 1238 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1239 else: 1240 vedo.logger.error(f"in vorticity(): unknown option {on}") 1241 raise RuntimeError 1242 1243 if array_name is None: 1244 if varr.GetVectors(): 1245 array_name = varr.GetVectors().GetName() 1246 else: 1247 vedo.logger.error(f"in vorticity(): no vectors found for {on}") 1248 raise RuntimeError 1249 1250 vort.SetInputData(self.dataset) 1251 vort.SetInputScalars(tp, array_name) 1252 vort.ComputeDivergenceOff() 1253 vort.ComputeGradientOff() 1254 vort.ComputeVorticityOn() 1255 vort.SetVorticityArrayName("Vorticity") 1256 vort.SetFasterApproximation(fast) 1257 vort.Update() 1258 if on.startswith("p"): 1259 vvecs = utils.vtk2numpy(vort.GetOutput().GetPointData().GetArray("Vorticity")) 1260 else: 1261 vvecs = utils.vtk2numpy(vort.GetOutput().GetCellData().GetArray("Vorticity")) 1262 return vvecs 1263 1264 def probe( 1265 self, 1266 source, 1267 categorical=False, 1268 snap=False, 1269 tol=0, 1270 ) -> Self: 1271 """ 1272 Takes a data set and probes its scalars at the specified points in space. 1273 1274 Note that a mask is also output with valid/invalid points which can be accessed 1275 with `mesh.pointdata['ValidPointMask']`. 1276 1277 Arguments: 1278 source : any dataset 1279 the data set to probe. 1280 categorical : bool 1281 control whether the source pointdata is to be treated as categorical. 1282 snap : bool 1283 snap to the cell with the closest point if no cell was found 1284 tol : float 1285 the tolerance to use when performing the probe. 1286 1287 Check out also: 1288 `interpolate_data_from()` and `tovolume()` 1289 1290 Examples: 1291 - [probe_points.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/probe_points.py) 1292 1293 ![](https://vedo.embl.es/images/volumetric/probePoints.png) 1294 """ 1295 probe_filter = vtki.new("ProbeFilter") 1296 probe_filter.SetSourceData(source.dataset) 1297 probe_filter.SetInputData(self.dataset) 1298 probe_filter.PassCellArraysOn() 1299 probe_filter.PassFieldArraysOn() 1300 probe_filter.PassPointArraysOn() 1301 probe_filter.SetCategoricalData(categorical) 1302 probe_filter.ComputeToleranceOff() 1303 if tol: 1304 probe_filter.ComputeToleranceOn() 1305 probe_filter.SetTolerance(tol) 1306 probe_filter.SetSnapToCellWithClosestPoint(snap) 1307 probe_filter.Update() 1308 self._update(probe_filter.GetOutput(), reset_locators=False) 1309 self.pipeline = utils.OperationNode("probe", parents=[self, source]) 1310 self.pointdata.rename("vtkValidPointMask", "ValidPointMask") 1311 return self 1312 1313 def compute_cell_size(self) -> Self: 1314 """ 1315 Add to this object a cell data array 1316 containing the area, volume and edge length 1317 of the cells (when applicable to the object type). 1318 1319 Array names are: `Area`, `Volume`, `Length`. 1320 """ 1321 csf = vtki.new("CellSizeFilter") 1322 csf.SetInputData(self.dataset) 1323 csf.SetComputeArea(1) 1324 csf.SetComputeVolume(1) 1325 csf.SetComputeLength(1) 1326 csf.SetComputeVertexCount(0) 1327 csf.SetAreaArrayName("Area") 1328 csf.SetVolumeArrayName("Volume") 1329 csf.SetLengthArrayName("Length") 1330 csf.Update() 1331 self._update(csf.GetOutput(), reset_locators=False) 1332 return self 1333 1334 def generate_random_data(self) -> Self: 1335 """Fill a dataset with random attributes""" 1336 gen = vtki.new("RandomAttributeGenerator") 1337 gen.SetInputData(self.dataset) 1338 gen.GenerateAllDataOn() 1339 gen.SetDataTypeToFloat() 1340 gen.GeneratePointNormalsOff() 1341 gen.GeneratePointTensorsOn() 1342 gen.GenerateCellScalarsOn() 1343 gen.Update() 1344 self._update(gen.GetOutput(), reset_locators=False) 1345 self.pipeline = utils.OperationNode("generate_random_data", parents=[self]) 1346 return self 1347 1348 def integrate_data(self) -> dict: 1349 """ 1350 Integrate point and cell data arrays while computing length, 1351 area or volume of the domain. It works for 1D, 2D or 3D cells. 1352 1353 For volumetric datasets, this filter ignores all but 3D cells. 1354 It will not compute the volume contained in a closed surface. 1355 1356 Returns a dictionary with keys: `pointdata`, `celldata`, `metadata`, 1357 which contain the integration result for the corresponding attributes. 1358 1359 Examples: 1360 ```python 1361 from vedo import * 1362 surf = Sphere(res=100) 1363 surf.pointdata['scalars'] = np.ones(surf.npoints) 1364 data = surf.integrate_data() 1365 print(data['pointdata']['scalars'], "is equal to 4pi", 4*np.pi) 1366 ``` 1367 1368 ```python 1369 from vedo import * 1370 1371 xcoords1 = np.arange(0, 2.2, 0.2) 1372 xcoords2 = sqrt(np.arange(0, 4.2, 0.2)) 1373 1374 ycoords = np.arange(0, 1.2, 0.2) 1375 1376 surf1 = Grid(s=(xcoords1, ycoords)).rotate_y(-45).lw(2) 1377 surf2 = Grid(s=(xcoords2, ycoords)).rotate_y(-45).lw(2) 1378 1379 surf1.pointdata['scalars'] = surf1.vertices[:,2] 1380 surf2.pointdata['scalars'] = surf2.vertices[:,2] 1381 1382 data1 = surf1.integrate_data() 1383 data2 = surf2.integrate_data() 1384 1385 print(data1['pointdata']['scalars'], 1386 "is equal to", 1387 data2['pointdata']['scalars'], 1388 "even if the grids are different!", 1389 "(= the volume under the surface)" 1390 ) 1391 show(surf1, surf2, N=2, axes=1).close() 1392 ``` 1393 """ 1394 vinteg = vtki.new("IntegrateAttributes") 1395 vinteg.SetInputData(self.dataset) 1396 vinteg.Update() 1397 ugrid = vedo.UnstructuredGrid(vinteg.GetOutput()) 1398 data = dict( 1399 pointdata=ugrid.pointdata.todict(), 1400 celldata=ugrid.celldata.todict(), 1401 metadata=ugrid.metadata.todict(), 1402 ) 1403 return data 1404 1405 def write(self, filename, binary=True) -> None: 1406 """Write object to file.""" 1407 out = vedo.file_io.write(self, filename, binary) 1408 out.pipeline = utils.OperationNode( 1409 "write", parents=[self], comment=filename[:15], shape="folder", c="#8a817c" 1410 ) 1411 1412 def tomesh(self, bounds=(), shrink=0) -> "vedo.Mesh": 1413 """ 1414 Extract boundary geometry from dataset (or convert data to polygonal type). 1415 1416 Two new arrays are added to the mesh: `OriginalCellIds` and `OriginalPointIds` 1417 to keep track of the original mesh elements. 1418 1419 Arguments: 1420 bounds : (list) 1421 specify a sub-region to extract 1422 shrink : (float) 1423 shrink the cells to a fraction of their original size 1424 """ 1425 geo = vtki.new("GeometryFilter") 1426 1427 if shrink: 1428 sf = vtki.new("ShrinkFilter") 1429 sf.SetInputData(self.dataset) 1430 sf.SetShrinkFactor(shrink) 1431 sf.Update() 1432 geo.SetInputData(sf.GetOutput()) 1433 else: 1434 geo.SetInputData(self.dataset) 1435 1436 geo.SetPassThroughCellIds(1) 1437 geo.SetPassThroughPointIds(1) 1438 geo.SetOriginalCellIdsName("OriginalCellIds") 1439 geo.SetOriginalPointIdsName("OriginalPointIds") 1440 geo.SetNonlinearSubdivisionLevel(1) 1441 # geo.MergingOff() # crashes on StructuredGrids 1442 if bounds: 1443 geo.SetExtent(bounds) 1444 geo.ExtentClippingOn() 1445 geo.Update() 1446 msh = vedo.mesh.Mesh(geo.GetOutput()) 1447 msh.pipeline = utils.OperationNode("tomesh", parents=[self], c="#9e2a2b") 1448 return msh 1449 1450 def signed_distance(self, dims=(20, 20, 20), bounds=None, invert=False, max_radius=None) -> "vedo.Volume": 1451 """ 1452 Compute the `Volume` object whose voxels contains the signed distance from 1453 the object. The calling object must have "Normals" defined. 1454 1455 Arguments: 1456 bounds : (list, actor) 1457 bounding box sizes 1458 dims : (list) 1459 dimensions (nr. of voxels) of the output volume. 1460 invert : (bool) 1461 flip the sign 1462 max_radius : (float) 1463 specify how far out to propagate distance calculation 1464 1465 Examples: 1466 - [distance2mesh.py](https://github.com/marcomusy/vedo/blob/master/examples/basic/distance2mesh.py) 1467 1468 ![](https://vedo.embl.es/images/basic/distance2mesh.png) 1469 """ 1470 if bounds is None: 1471 bounds = self.bounds() 1472 if max_radius is None: 1473 max_radius = self.diagonal_size() / 2 1474 dist = vtki.new("SignedDistance") 1475 dist.SetInputData(self.dataset) 1476 dist.SetRadius(max_radius) 1477 dist.SetBounds(bounds) 1478 dist.SetDimensions(dims) 1479 dist.Update() 1480 img = dist.GetOutput() 1481 if invert: 1482 mat = vtki.new("ImageMathematics") 1483 mat.SetInput1Data(img) 1484 mat.SetOperationToMultiplyByK() 1485 mat.SetConstantK(-1) 1486 mat.Update() 1487 img = mat.GetOutput() 1488 1489 vol = vedo.Volume(img) 1490 vol.name = "SignedDistanceVolume" 1491 vol.pipeline = utils.OperationNode( 1492 "signed_distance", 1493 parents=[self], 1494 comment=f"dims={tuple(vol.dimensions())}", 1495 c="#e9c46a:#0096c7", 1496 ) 1497 return vol 1498 1499 def unsigned_distance( 1500 self, dims=(25,25,25), bounds=(), max_radius=0, cap_value=0) -> "vedo.Volume": 1501 """ 1502 Compute the `Volume` object whose voxels contains the unsigned distance. 1503 """ 1504 dist = vtki.new("UnsignedDistance") 1505 dist.SetInputData(self.dataset) 1506 dist.SetDimensions(dims) 1507 1508 if len(bounds) == 6: 1509 dist.SetBounds(bounds) 1510 # elif bounds == "auto": 1511 # dist.AdjustBoundsOn() 1512 else: 1513 dist.SetBounds(self.bounds()) 1514 if not max_radius: 1515 max_radius = self.diagonal_size() / 10 1516 dist.SetRadius(max_radius) 1517 1518 if self.point_locator: 1519 dist.SetLocator(self.point_locator) 1520 1521 if cap_value is not None: 1522 dist.CappingOn() 1523 dist.SetCapValue(cap_value) 1524 dist.SetOutputScalarTypeToFloat() 1525 dist.Update() 1526 vol = vedo.Volume(dist.GetOutput()) 1527 vol.name = "UnsignedDistanceVolume" 1528 vol.pipeline = utils.OperationNode( 1529 "unsigned_distance", parents=[self], c="#e9c46a:#0096c7") 1530 return vol 1531 1532 def smooth_data(self, 1533 niter=10, relaxation_factor=0.1, strategy=0, mask=None, 1534 exclude=("Normals", "TextureCoordinates"), 1535 ) -> Self: 1536 """ 1537 Smooth point attribute data using distance weighted Laplacian kernel. 1538 1539 The effect is to blur regions of high variation and emphasize low variation regions. 1540 1541 Arguments: 1542 niter : (int) 1543 number of iterations 1544 relaxation_factor : (float) 1545 relaxation factor controlling the amount of Laplacian smoothing applied 1546 strategy : (int) 1547 strategy to use for Laplacian smoothing 1548 - 0: use all points, all point data attributes are smoothed 1549 - 1: smooth all point attribute data except those on the boundary 1550 - 2: only point data connected to a boundary point are smoothed 1551 mask : (str, np.ndarray) 1552 array to be used as a mask (ignore then the strategy keyword) 1553 exclude : (list) 1554 list of arrays to be excluded from smoothing 1555 """ 1556 try: 1557 saf = vtki.new("AttributeSmoothingFilter") 1558 except: 1559 vedo.logger.error("smooth_data() only avaialble in VTK>=9.3.0") 1560 return self 1561 saf.SetInputData(self.dataset) 1562 saf.SetRelaxationFactor(relaxation_factor) 1563 saf.SetNumberOfIterations(niter) 1564 1565 for ex in exclude: 1566 saf.AddExcludedArray(ex) 1567 1568 saf.SetWeightsTypeToDistance2() 1569 1570 saf.SetSmoothingStrategy(strategy) 1571 if mask is not None: 1572 saf.SetSmoothingStrategyToSmoothingMask() 1573 if isinstance(mask, str): 1574 mask_ = self.dataset.GetPointData().GetArray(mask) 1575 if not mask_: 1576 vedo.logger.error(f"smooth_data(): mask array {mask} not found") 1577 return self 1578 mask_array = vtki.vtkUnsignedCharArray() 1579 mask_array.ShallowCopy(mask_) 1580 mask_array.SetName(mask_.GetName()) 1581 else: 1582 mask_array = utils.numpy2vtk(mask, dtype=np.uint8) 1583 saf.SetSmoothingMask(mask_array) 1584 1585 saf.Update() 1586 1587 self._update(saf.GetOutput()) 1588 self.pipeline = utils.OperationNode( 1589 "smooth_data", comment=f"strategy {strategy}", parents=[self], c="#9e2a2b" 1590 ) 1591 return self 1592 1593 def compute_streamlines( 1594 self, 1595 seeds: Any, 1596 integrator="rk4", 1597 direction="forward", 1598 initial_step_size=None, 1599 max_propagation=None, 1600 max_steps=10000, 1601 step_length=0, 1602 surface_constrained=False, 1603 compute_vorticity=False, 1604 ) -> Union["vedo.Lines", None]: 1605 """ 1606 Integrate a vector field to generate streamlines. 1607 1608 Arguments: 1609 seeds : (Mesh, Points, list) 1610 starting points of the streamlines 1611 integrator : (str) 1612 type of integration method to be used: 1613 - "rk2" (Runge-Kutta 2) 1614 - "rk4" (Runge-Kutta 4) 1615 - "rk45" (Runge-Kutta 45) 1616 direction : (str) 1617 direction of integration, either "forward", "backward" or "both" 1618 initial_step_size : (float) 1619 initial step size used for line integration 1620 max_propagation : (float) 1621 maximum length of a streamline expressed in absolute units 1622 max_steps : (int) 1623 maximum number of steps for a streamline 1624 step_length : (float) 1625 maximum length of a step expressed in absolute units 1626 surface_constrained : (bool) 1627 whether to stop integrating when the streamline leaves the surface 1628 compute_vorticity : (bool) 1629 whether to compute the vorticity at each streamline point 1630 """ 1631 b = self.dataset.GetBounds() 1632 size = (b[5]-b[4] + b[3]-b[2] + b[1]-b[0]) / 3 1633 if initial_step_size is None: 1634 initial_step_size = size / 1000.0 1635 1636 if max_propagation is None: 1637 max_propagation = size * 2 1638 1639 if utils.is_sequence(seeds): 1640 seeds = vedo.Points(seeds) 1641 1642 sti = vtki.new("StreamTracer") 1643 sti.SetSourceData(seeds.dataset) 1644 if isinstance(self, vedo.RectilinearGrid): 1645 sti.SetInputData(vedo.UnstructuredGrid(self.dataset).dataset) 1646 else: 1647 sti.SetInputDataObject(self.dataset) 1648 1649 sti.SetInitialIntegrationStep(initial_step_size) 1650 sti.SetComputeVorticity(compute_vorticity) 1651 sti.SetMaximumNumberOfSteps(max_steps) 1652 sti.SetMaximumPropagation(max_propagation) 1653 sti.SetSurfaceStreamlines(surface_constrained) 1654 if step_length: 1655 sti.SetMaximumIntegrationStep(step_length) 1656 1657 if "for" in direction: 1658 sti.SetIntegrationDirectionToForward() 1659 elif "back" in direction: 1660 sti.SetIntegrationDirectionToBackward() 1661 elif "both" in direction: 1662 sti.SetIntegrationDirectionToBoth() 1663 else: 1664 vedo.logger.error(f"in compute_streamlines(), unknown direction {direction}") 1665 return None 1666 1667 if integrator == "rk2": 1668 sti.SetIntegratorTypeToRungeKutta2() 1669 elif integrator == "rk4": 1670 sti.SetIntegratorTypeToRungeKutta4() 1671 elif integrator == "rk45": 1672 sti.SetIntegratorTypeToRungeKutta45() 1673 else: 1674 vedo.logger.error(f"in compute_streamlines(), unknown integrator {integrator}") 1675 return None 1676 1677 sti.Update() 1678 1679 stlines = vedo.shapes.Lines(sti.GetOutput(), lw=4) 1680 stlines.name = "StreamLines" 1681 self.pipeline = utils.OperationNode( 1682 "compute_streamlines", comment=f"{integrator}", parents=[self, seeds], c="#9e2a2b" 1683 ) 1684 return stlines 1685 1686############################################################################### 1687class PointAlgorithms(CommonAlgorithms): 1688 """Methods for point clouds.""" 1689 1690 def apply_transform(self, LT: Any, deep_copy=True) -> Self: 1691 """ 1692 Apply a linear or non-linear transformation to the mesh polygonal data. 1693 1694 Example: 1695 ```python 1696 from vedo import Cube, show, settings 1697 settings.use_parallel_projection = True 1698 c1 = Cube().rotate_z(25).pos(2,1).mirror().alpha(0.5) 1699 T = c1.transform # rotate by 5 degrees, place at (2,1) 1700 c2 = Cube().c('red4').wireframe().lw(10).lighting('off') 1701 c2.apply_transform(T) 1702 show(c1, c2, "The 2 cubes should overlap!", axes=1).close() 1703 ``` 1704 1705 ![](https://vedo.embl.es/images/feats/apply_transform.png) 1706 """ 1707 if self.dataset.GetNumberOfPoints() == 0: 1708 return self 1709 1710 if isinstance(LT, LinearTransform): 1711 LT_is_linear = True 1712 tr = LT.T 1713 if LT.is_identity(): 1714 return self 1715 1716 elif isinstance(LT, (vtki.vtkMatrix4x4, vtki.vtkLinearTransform)) or utils.is_sequence(LT): 1717 LT_is_linear = True 1718 LT = LinearTransform(LT) 1719 tr = LT.T 1720 if LT.is_identity(): 1721 return self 1722 1723 elif isinstance(LT, NonLinearTransform): 1724 LT_is_linear = False 1725 tr = LT.T 1726 self.transform = LT # reset 1727 1728 elif isinstance(LT, vtki.vtkThinPlateSplineTransform): 1729 LT_is_linear = False 1730 tr = LT 1731 self.transform = NonLinearTransform(LT) # reset 1732 1733 else: 1734 vedo.logger.error(f"apply_transform(), unknown input type:\n{LT}") 1735 return self 1736 1737 ################ 1738 if LT_is_linear: 1739 try: 1740 # self.transform might still not be linear 1741 self.transform.concatenate(LT) 1742 except AttributeError: 1743 # in that case reset it 1744 self.transform = LinearTransform() 1745 1746 ################ 1747 if isinstance(self.dataset, vtki.vtkPolyData): 1748 tp = vtki.new("TransformPolyDataFilter") 1749 elif isinstance(self.dataset, vtki.vtkUnstructuredGrid): 1750 tp = vtki.new("TransformFilter") 1751 tp.TransformAllInputVectorsOn() 1752 # elif isinstance(self.dataset, vtki.vtkImageData): 1753 # tp = vtki.new("ImageReslice") 1754 # tp.SetInterpolationModeToCubic() 1755 # tp.SetResliceTransform(tr) 1756 else: 1757 vedo.logger.error(f"apply_transform(), unknown input type: {[self.dataset]}") 1758 return self 1759 1760 tp.SetTransform(tr) 1761 tp.SetInputData(self.dataset) 1762 tp.Update() 1763 out = tp.GetOutput() 1764 1765 if deep_copy: 1766 self.dataset.DeepCopy(out) 1767 else: 1768 self.dataset.ShallowCopy(out) 1769 1770 # reset the locators 1771 self.point_locator = None 1772 self.cell_locator = None 1773 self.line_locator = None 1774 return self 1775 1776 def apply_transform_from_actor(self) -> LinearTransform: 1777 """ 1778 Apply the current transformation of the actor to the data. 1779 Useful when manually moving an actor (eg. when pressing "a"). 1780 Returns the `LinearTransform` object. 1781 1782 Note that this method is automatically called when the window is closed, 1783 or the interactor style is changed. 1784 """ 1785 M = self.actor.GetMatrix() 1786 self.apply_transform(M) 1787 iden = vtki.vtkMatrix4x4() 1788 self.actor.PokeMatrix(iden) 1789 return LinearTransform(M) 1790 1791 def pos(self, x=None, y=None, z=None) -> Self: 1792 """Set/Get object position.""" 1793 if x is None: # get functionality 1794 return self.transform.position 1795 1796 if z is None and y is None: # assume x is of the form (x,y,z) 1797 if len(x) == 3: 1798 x, y, z = x 1799 else: 1800 x, y = x 1801 z = 0 1802 elif z is None: # assume x,y is of the form x, y 1803 z = 0 1804 1805 q = self.transform.position 1806 delta = [x, y, z] - q 1807 if delta[0] == delta[1] == delta[2] == 0: 1808 return self 1809 LT = LinearTransform().translate(delta) 1810 return self.apply_transform(LT) 1811 1812 def shift(self, dx=0, dy=0, dz=0) -> Self: 1813 """Add a vector to the current object position.""" 1814 if utils.is_sequence(dx): 1815 dx, dy, dz = utils.make3d(dx) 1816 if dx == dy == dz == 0: 1817 return self 1818 LT = LinearTransform().translate([dx, dy, dz]) 1819 return self.apply_transform(LT) 1820 1821 def x(self, val=None) -> Self: 1822 """Set/Get object position along x axis.""" 1823 p = self.transform.position 1824 if val is None: 1825 return p[0] 1826 self.pos(val, p[1], p[2]) 1827 return self 1828 1829 def y(self, val=None)-> Self: 1830 """Set/Get object position along y axis.""" 1831 p = self.transform.position 1832 if val is None: 1833 return p[1] 1834 self.pos(p[0], val, p[2]) 1835 return self 1836 1837 def z(self, val=None) -> Self: 1838 """Set/Get object position along z axis.""" 1839 p = self.transform.position 1840 if val is None: 1841 return p[2] 1842 self.pos(p[0], p[1], val) 1843 return self 1844 1845 def rotate(self, angle: float, axis=(1, 0, 0), point=(0, 0, 0), rad=False) -> Self: 1846 """ 1847 Rotate around an arbitrary `axis` passing through `point`. 1848 1849 Example: 1850 ```python 1851 from vedo import * 1852 c1 = Cube() 1853 c2 = c1.clone().c('violet').alpha(0.5) # copy of c1 1854 v = vector(0.2,1,0) 1855 p = vector(1,0,0) # axis passes through this point 1856 c2.rotate(90, axis=v, point=p) 1857 l = Line(-v+p, v+p).lw(3).c('red') 1858 show(c1, l, c2, axes=1).close() 1859 ``` 1860 1861 ![](https://vedo.embl.es/images/feats/rotate_axis.png) 1862 """ 1863 LT = LinearTransform() 1864 LT.rotate(angle, axis, point, rad) 1865 return self.apply_transform(LT) 1866 1867 def rotate_x(self, angle: float, rad=False, around=None) -> Self: 1868 """ 1869 Rotate around x-axis. If angle is in radians set `rad=True`. 1870 1871 Use `around` to define a pivoting point. 1872 """ 1873 if angle == 0: 1874 return self 1875 LT = LinearTransform().rotate_x(angle, rad, around) 1876 return self.apply_transform(LT) 1877 1878 def rotate_y(self, angle: float, rad=False, around=None) -> Self: 1879 """ 1880 Rotate around y-axis. If angle is in radians set `rad=True`. 1881 1882 Use `around` to define a pivoting point. 1883 """ 1884 if angle == 0: 1885 return self 1886 LT = LinearTransform().rotate_y(angle, rad, around) 1887 return self.apply_transform(LT) 1888 1889 def rotate_z(self, angle: float, rad=False, around=None) -> Self: 1890 """ 1891 Rotate around z-axis. If angle is in radians set `rad=True`. 1892 1893 Use `around` to define a pivoting point. 1894 """ 1895 if angle == 0: 1896 return self 1897 LT = LinearTransform().rotate_z(angle, rad, around) 1898 return self.apply_transform(LT) 1899 1900 def reorient(self, initaxis, newaxis, rotation=0, rad=False, xyplane=False) -> Self: 1901 """ 1902 Reorient the object to point to a new direction from an initial one. 1903 If `initaxis` is None, the object will be assumed in its "default" orientation. 1904 If `xyplane` is True, the object will be rotated to lie on the xy plane. 1905 1906 Use `rotation` to first rotate the object around its `initaxis`. 1907 """ 1908 q = self.transform.position 1909 LT = LinearTransform() 1910 LT.reorient(initaxis, newaxis, q, rotation, rad, xyplane) 1911 return self.apply_transform(LT) 1912 1913 def scale(self, s=None, reset=False, origin=True) -> Union[Self, np.array]: 1914 """ 1915 Set/get object's scaling factor. 1916 1917 Arguments: 1918 s : (list, float) 1919 scaling factor(s). 1920 reset : (bool) 1921 if True previous scaling factors are ignored. 1922 origin : (bool) 1923 if True scaling is applied with respect to object's position, 1924 otherwise is applied respect to (0,0,0). 1925 1926 Note: 1927 use `s=(sx,sy,sz)` to scale differently in the three coordinates. 1928 """ 1929 if s is None: 1930 return np.array(self.transform.T.GetScale()) 1931 1932 if not utils.is_sequence(s): 1933 s = [s, s, s] 1934 1935 LT = LinearTransform() 1936 if reset: 1937 old_s = np.array(self.transform.T.GetScale()) 1938 LT.scale(s / old_s) 1939 else: 1940 if origin is True: 1941 LT.scale(s, origin=self.transform.position) 1942 elif origin is False: 1943 LT.scale(s, origin=False) 1944 else: 1945 LT.scale(s, origin=origin) 1946 1947 return self.apply_transform(LT) 1948 1949 1950############################################################################### 1951class VolumeAlgorithms(CommonAlgorithms): 1952 """Methods for Volume objects.""" 1953 1954 def bounds(self) -> np.ndarray: 1955 """ 1956 Get the object bounds. 1957 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 1958 """ 1959 # OVERRIDE CommonAlgorithms.bounds() which is too slow 1960 return np.array(self.dataset.GetBounds()) 1961 1962 def isosurface(self, value=None, flying_edges=False) -> "vedo.mesh.Mesh": 1963 """ 1964 Return an `Mesh` isosurface extracted from the `Volume` object. 1965 1966 Set `value` as single float or list of values to draw the isosurface(s). 1967 Use flying_edges for faster results (but sometimes can interfere with `smooth()`). 1968 1969 Examples: 1970 - [isosurfaces1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces1.py) 1971 1972 ![](https://vedo.embl.es/images/volumetric/isosurfaces.png) 1973 """ 1974 scrange = self.dataset.GetScalarRange() 1975 1976 if flying_edges: 1977 cf = vtki.new("FlyingEdges3D") 1978 cf.InterpolateAttributesOn() 1979 else: 1980 cf = vtki.new("ContourFilter") 1981 cf.UseScalarTreeOn() 1982 1983 cf.SetInputData(self.dataset) 1984 cf.ComputeNormalsOn() 1985 1986 if utils.is_sequence(value): 1987 cf.SetNumberOfContours(len(value)) 1988 for i, t in enumerate(value): 1989 cf.SetValue(i, t) 1990 else: 1991 if value is None: 1992 value = (2 * scrange[0] + scrange[1]) / 3.0 1993 # print("automatic isosurface value =", value) 1994 cf.SetValue(0, value) 1995 1996 cf.Update() 1997 poly = cf.GetOutput() 1998 1999 out = vedo.mesh.Mesh(poly, c=None).phong() 2000 out.mapper.SetScalarRange(scrange[0], scrange[1]) 2001 2002 out.pipeline = utils.OperationNode( 2003 "isosurface", 2004 parents=[self], 2005 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 2006 c="#4cc9f0:#e9c46a", 2007 ) 2008 return out 2009 2010 def isosurface_discrete(self, value=None, nsmooth=15) -> "vedo.mesh.Mesh": 2011 """ 2012 Create boundary/isocontour surfaces from a label map (e.g., a segmented image) using a threaded, 2013 3D version of the multiple objects/labels Surface Nets algorithm. 2014 The input is a 3D image (i.e., volume) where each voxel is labeled 2015 (integer labels are preferred to real values), and the output data is a polygonal mesh separating 2016 labeled regions / objects. 2017 (Note that on output each region [corresponding to a different segmented object] will share 2018 points/edges on a common boundary, i.e., two neighboring objects will share the boundary that separates them). 2019 2020 Arguments: 2021 value : (float, list) 2022 single value or list of values to draw the isosurface(s). 2023 nsmooth : (int) 2024 number of iterations of smoothing (0 means no smoothing). 2025 2026 Examples: 2027 - [isosurfaces2.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces2.py) 2028 """ 2029 if not utils.is_sequence(value): 2030 value = [value] 2031 2032 snets = vtki.new("SurfaceNets3D") 2033 snets.SetInputData(self.dataset) 2034 2035 if nsmooth: 2036 snets.SmoothingOn() 2037 snets.AutomaticSmoothingConstraintsOn() 2038 snets.GetSmoother().SetNumberOfIterations(nsmooth) 2039 # snets.GetSmoother().SetRelaxationFactor(relaxation_factor) 2040 # snets.GetSmoother().SetConstraintDistance(constraint_distance) 2041 else: 2042 snets.SmoothingOff() 2043 2044 for i, val in enumerate(value): 2045 snets.SetValue(i, val) 2046 snets.Update() 2047 snets.SetOutputMeshTypeToTriangles() 2048 snets.SetOutputStyleToBoundary() 2049 snets.Update() 2050 2051 out = vedo.mesh.Mesh(snets.GetOutput()) 2052 out.pipeline = utils.OperationNode( 2053 "isosurface_discrete", 2054 parents=[self], 2055 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 2056 c="#4cc9f0:#e9c46a", 2057 ) 2058 return out 2059 2060 2061 def legosurface( 2062 self, 2063 vmin=None, 2064 vmax=None, 2065 invert=False, 2066 boundary=False, 2067 array_name="input_scalars", 2068 ) -> "vedo.mesh.Mesh": 2069 """ 2070 Represent an object - typically a `Volume` - as lego blocks (voxels). 2071 By default colors correspond to the volume's scalar. 2072 Returns an `Mesh` object. 2073 2074 Arguments: 2075 vmin : (float) 2076 the lower threshold, voxels below this value are not shown. 2077 vmax : (float) 2078 the upper threshold, voxels above this value are not shown. 2079 boundary : (bool) 2080 controls whether to include cells that are partially inside 2081 array_name : (int, str) 2082 name or index of the scalar array to be considered 2083 2084 Examples: 2085 - [legosurface.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/legosurface.py) 2086 2087 ![](https://vedo.embl.es/images/volumetric/56820682-da40e500-684c-11e9-8ea3-91cbcba24b3a.png) 2088 """ 2089 imp_dataset = vtki.new("ImplicitDataSet") 2090 imp_dataset.SetDataSet(self.dataset) 2091 window = vtki.new("ImplicitWindowFunction") 2092 window.SetImplicitFunction(imp_dataset) 2093 2094 srng = list(self.dataset.GetScalarRange()) 2095 if vmin is not None: 2096 srng[0] = vmin 2097 if vmax is not None: 2098 srng[1] = vmax 2099 tol = 0.00001 * (srng[1] - srng[0]) 2100 srng[0] -= tol 2101 srng[1] += tol 2102 window.SetWindowRange(srng) 2103 2104 extract = vtki.new("ExtractGeometry") 2105 extract.SetInputData(self.dataset) 2106 extract.SetImplicitFunction(window) 2107 extract.SetExtractInside(invert) 2108 extract.SetExtractBoundaryCells(boundary) 2109 extract.Update() 2110 2111 gf = vtki.new("GeometryFilter") 2112 gf.SetInputData(extract.GetOutput()) 2113 gf.Update() 2114 2115 m = vedo.mesh.Mesh(gf.GetOutput()).lw(0.1).flat() 2116 m.map_points_to_cells() 2117 m.celldata.select(array_name) 2118 2119 m.pipeline = utils.OperationNode( 2120 "legosurface", 2121 parents=[self], 2122 comment=f"array: {array_name}", 2123 c="#4cc9f0:#e9c46a", 2124 ) 2125 return m 2126 2127 def tomesh(self, fill=True, shrink=1.0) -> "vedo.mesh.Mesh": 2128 """ 2129 Build a polygonal Mesh from the current object. 2130 2131 If `fill=True`, the interior faces of all the cells are created. 2132 (setting a `shrink` value slightly smaller than the default 1.0 2133 can avoid flickering due to internal adjacent faces). 2134 2135 If `fill=False`, only the boundary faces will be generated. 2136 """ 2137 gf = vtki.new("GeometryFilter") 2138 if fill: 2139 sf = vtki.new("ShrinkFilter") 2140 sf.SetInputData(self.dataset) 2141 sf.SetShrinkFactor(shrink) 2142 sf.Update() 2143 gf.SetInputData(sf.GetOutput()) 2144 gf.Update() 2145 poly = gf.GetOutput() 2146 if shrink == 1.0: 2147 clean_poly = vtki.new("CleanPolyData") 2148 clean_poly.PointMergingOn() 2149 clean_poly.ConvertLinesToPointsOn() 2150 clean_poly.ConvertPolysToLinesOn() 2151 clean_poly.ConvertStripsToPolysOn() 2152 clean_poly.SetInputData(poly) 2153 clean_poly.Update() 2154 poly = clean_poly.GetOutput() 2155 else: 2156 gf.SetInputData(self.dataset) 2157 gf.Update() 2158 poly = gf.GetOutput() 2159 2160 msh = vedo.mesh.Mesh(poly).flat() 2161 msh.scalarbar = self.scalarbar 2162 lut = utils.ctf2lut(self) 2163 if lut: 2164 msh.mapper.SetLookupTable(lut) 2165 2166 msh.pipeline = utils.OperationNode( 2167 "tomesh", parents=[self], comment=f"fill={fill}", c="#9e2a2b:#e9c46a" 2168 ) 2169 return msh
45class DataArrayHelper: 46 # Internal use only. 47 # Helper class to manage data associated to either 48 # points (or vertices) and cells (or faces). 49 def __init__(self, obj, association): 50 51 self.obj = obj 52 self.association = association 53 54 def __getitem__(self, key): 55 56 if self.association == 0: 57 data = self.obj.dataset.GetPointData() 58 59 elif self.association == 1: 60 data = self.obj.dataset.GetCellData() 61 62 elif self.association == 2: 63 data = self.obj.dataset.GetFieldData() 64 65 varr = data.GetAbstractArray(key) 66 if isinstance(varr, vtki.vtkStringArray): 67 if isinstance(key, int): 68 key = data.GetArrayName(key) 69 n = varr.GetNumberOfValues() 70 narr = [varr.GetValue(i) for i in range(n)] 71 return narr 72 ########### 73 74 else: 75 raise RuntimeError() 76 77 if isinstance(key, int): 78 key = data.GetArrayName(key) 79 80 arr = data.GetArray(key) 81 if not arr: 82 return None 83 return utils.vtk2numpy(arr) 84 85 def __setitem__(self, key, input_array): 86 87 if self.association == 0: 88 data = self.obj.dataset.GetPointData() 89 n = self.obj.dataset.GetNumberOfPoints() 90 self.obj.mapper.SetScalarModeToUsePointData() 91 92 elif self.association == 1: 93 data = self.obj.dataset.GetCellData() 94 n = self.obj.dataset.GetNumberOfCells() 95 self.obj.mapper.SetScalarModeToUseCellData() 96 97 elif self.association == 2: 98 data = self.obj.dataset.GetFieldData() 99 if not utils.is_sequence(input_array): 100 input_array = [input_array] 101 102 if isinstance(input_array[0], str): 103 varr = vtki.vtkStringArray() 104 varr.SetName(key) 105 varr.SetNumberOfComponents(1) 106 varr.SetNumberOfTuples(len(input_array)) 107 for i, iarr in enumerate(input_array): 108 if isinstance(iarr, np.ndarray): 109 iarr = iarr.tolist() # better format 110 # Note: a string k can be converted to numpy with 111 # import json; k = np.array(json.loads(k)) 112 varr.InsertValue(i, str(iarr)) 113 else: 114 try: 115 varr = utils.numpy2vtk(input_array, name=key) 116 except TypeError as e: 117 vedo.logger.error( 118 f"cannot create metadata with input object:\n" 119 f"{input_array}" 120 f"\n\nAllowed content examples are:\n" 121 f"- flat list of strings ['a','b', 1, [1,2,3], ...]" 122 f" (first item must be a string in this case)\n" 123 f" hint: use k = np.array(json.loads(k)) to convert strings\n" 124 f"- numpy arrays of any shape" 125 ) 126 raise e 127 128 data.AddArray(varr) 129 return ############ 130 131 else: 132 raise RuntimeError() 133 134 if len(input_array) != n: 135 vedo.logger.error( 136 f"Error in point/cell data: length of input {len(input_array)}" 137 f" != {n} nr. of elements" 138 ) 139 raise RuntimeError() 140 141 input_array = np.asarray(input_array) 142 varr = utils.numpy2vtk(input_array, name=key) 143 data.AddArray(varr) 144 145 if len(input_array.shape) == 1: # scalars 146 data.SetActiveScalars(key) 147 try: # could be a volume mapper 148 self.obj.mapper.SetScalarRange(data.GetScalars().GetRange()) 149 except AttributeError: 150 pass 151 elif len(input_array.shape) == 2 and input_array.shape[1] == 3: # vectors 152 if key.lower() == "normals": 153 data.SetActiveNormals(key) 154 else: 155 data.SetActiveVectors(key) 156 157 def keys(self) -> List[str]: 158 """Return the list of available data array names""" 159 if self.association == 0: 160 data = self.obj.dataset.GetPointData() 161 elif self.association == 1: 162 data = self.obj.dataset.GetCellData() 163 elif self.association == 2: 164 data = self.obj.dataset.GetFieldData() 165 arrnames = [] 166 for i in range(data.GetNumberOfArrays()): 167 name = "" 168 if self.association == 2: 169 name = data.GetAbstractArray(i).GetName() 170 else: 171 iarr = data.GetArray(i) 172 if iarr: 173 name = iarr.GetName() 174 if name: 175 arrnames.append(name) 176 return arrnames 177 178 def items(self) -> List: 179 """Return the list of available data array `(names, values)`.""" 180 if self.association == 0: 181 data = self.obj.dataset.GetPointData() 182 elif self.association == 1: 183 data = self.obj.dataset.GetCellData() 184 elif self.association == 2: 185 data = self.obj.dataset.GetFieldData() 186 arrnames = [] 187 for i in range(data.GetNumberOfArrays()): 188 if self.association == 2: 189 name = data.GetAbstractArray(i).GetName() 190 else: 191 name = data.GetArray(i).GetName() 192 if name: 193 arrnames.append((name, self[name])) 194 return arrnames 195 196 def todict(self) -> dict: 197 """Return a dictionary of the available data arrays.""" 198 return dict(self.items()) 199 200 def rename(self, oldname: str, newname: str) -> None: 201 """Rename an array""" 202 if self.association == 0: 203 varr = self.obj.dataset.GetPointData().GetArray(oldname) 204 elif self.association == 1: 205 varr = self.obj.dataset.GetCellData().GetArray(oldname) 206 elif self.association == 2: 207 varr = self.obj.dataset.GetFieldData().GetAbstractArray(oldname) 208 if varr: 209 varr.SetName(newname) 210 else: 211 vedo.logger.warning( 212 f"Cannot rename non existing array {oldname} to {newname}" 213 ) 214 215 def remove(self, key: Union[int, str]) -> None: 216 """Remove a data array by name or number""" 217 if self.association == 0: 218 self.obj.dataset.GetPointData().RemoveArray(key) 219 elif self.association == 1: 220 self.obj.dataset.GetCellData().RemoveArray(key) 221 elif self.association == 2: 222 self.obj.dataset.GetFieldData().RemoveArray(key) 223 224 def clear(self) -> None: 225 """Remove all data associated to this object""" 226 if self.association == 0: 227 data = self.obj.dataset.GetPointData() 228 elif self.association == 1: 229 data = self.obj.dataset.GetCellData() 230 elif self.association == 2: 231 data = self.obj.dataset.GetFieldData() 232 for i in range(data.GetNumberOfArrays()): 233 if self.association == 2: 234 name = data.GetAbstractArray(i).GetName() 235 else: 236 name = data.GetArray(i).GetName() 237 data.RemoveArray(name) 238 239 def select(self, key: Union[int, str]) -> Any: 240 """Select one specific array by its name to make it the `active` one.""" 241 # Default (ColorModeToDefault): unsigned char scalars are treated as colors, 242 # and NOT mapped through the lookup table, while everything else is. 243 # ColorModeToDirectScalar extends ColorModeToDefault such that all integer 244 # types are treated as colors with values in the range 0-255 245 # and floating types are treated as colors with values in the range 0.0-1.0. 246 # Setting ColorModeToMapScalars means that all scalar data will be mapped 247 # through the lookup table. 248 # (Note that for multi-component scalars, the particular component 249 # to use for mapping can be specified using the SelectColorArray() method.) 250 if self.association == 0: 251 data = self.obj.dataset.GetPointData() 252 self.obj.mapper.SetScalarModeToUsePointData() 253 else: 254 data = self.obj.dataset.GetCellData() 255 self.obj.mapper.SetScalarModeToUseCellData() 256 257 if isinstance(key, int): 258 key = data.GetArrayName(key) 259 260 arr = data.GetArray(key) 261 if not arr: 262 return self.obj 263 264 nc = arr.GetNumberOfComponents() 265 # print("GetNumberOfComponents", nc) 266 if nc == 1: 267 data.SetActiveScalars(key) 268 elif nc == 2: 269 data.SetTCoords(arr) 270 elif nc in (3, 4): 271 if "rgb" in key.lower(): # type: ignore 272 data.SetActiveScalars(key) 273 try: 274 # could be a volume mapper 275 self.obj.mapper.SetColorModeToDirectScalars() 276 data.SetActiveVectors(None) # need this to fix bug in #1066 277 # print("SetColorModeToDirectScalars for", key) 278 except AttributeError: 279 pass 280 else: 281 data.SetActiveVectors(key) 282 elif nc == 9: 283 data.SetActiveTensors(key) 284 else: 285 vedo.logger.error(f"Cannot select array {key} with {nc} components") 286 return self.obj 287 288 try: 289 # could be a volume mapper 290 self.obj.mapper.SetArrayName(key) 291 self.obj.mapper.ScalarVisibilityOn() 292 except AttributeError: 293 pass 294 295 return self.obj 296 297 def select_texture_coords(self, key: Union[int,str]) -> Any: 298 """Select one specific array to be used as texture coordinates.""" 299 if self.association == 0: 300 data = self.obj.dataset.GetPointData() 301 else: 302 vedo.logger.warning("texture coordinates are only available for point data") 303 return 304 305 if isinstance(key, int): 306 key = data.GetArrayName(key) 307 data.SetTCoords(data.GetArray(key)) 308 return self.obj 309 310 def select_normals(self, key: Union[int,str]) -> Any: 311 """Select one specific normal array by its name to make it the "active" one.""" 312 if self.association == 0: 313 data = self.obj.dataset.GetPointData() 314 self.obj.mapper.SetScalarModeToUsePointData() 315 else: 316 data = self.obj.dataset.GetCellData() 317 self.obj.mapper.SetScalarModeToUseCellData() 318 319 if isinstance(key, int): 320 key = data.GetArrayName(key) 321 data.SetActiveNormals(key) 322 return self.obj 323 324 def print(self, **kwargs) -> None: 325 """Print the array names available to terminal""" 326 colors.printc(self.keys(), **kwargs) 327 328 def __repr__(self) -> str: 329 """Representation""" 330 331 def _get_str(pd, header): 332 out = f"\x1b[2m\x1b[1m\x1b[7m{header}" 333 if pd.GetNumberOfArrays(): 334 if self.obj.name: 335 out += f" in {self.obj.name}" 336 out += f" contains {pd.GetNumberOfArrays()} array(s)\x1b[0m" 337 for i in range(pd.GetNumberOfArrays()): 338 varr = pd.GetArray(i) 339 out += f"\n\x1b[1m\x1b[4mArray name : {varr.GetName()}\x1b[0m" 340 out += "\nindex".ljust(15) + f": {i}" 341 t = varr.GetDataType() 342 if t in vtki.array_types: 343 out += "\ntype".ljust(15) 344 out += f": {vtki.array_types[t]}" 345 shape = (varr.GetNumberOfTuples(), varr.GetNumberOfComponents()) 346 out += "\nshape".ljust(15) + f": {shape}" 347 out += "\nrange".ljust(15) + f": {np.array(varr.GetRange())}" 348 out += "\nmax id".ljust(15) + f": {varr.GetMaxId()}" 349 out += "\nlook up table".ljust(15) + f": {bool(varr.GetLookupTable())}" 350 out += "\nin-memory size".ljust(15) + f": {varr.GetActualMemorySize()} KB" 351 else: 352 out += " is empty.\x1b[0m" 353 return out 354 355 if self.association == 0: 356 out = _get_str(self.obj.dataset.GetPointData(), "Point Data") 357 elif self.association == 1: 358 out = _get_str(self.obj.dataset.GetCellData(), "Cell Data") 359 elif self.association == 2: 360 pd = self.obj.dataset.GetFieldData() 361 if pd.GetNumberOfArrays(): 362 out = "\x1b[2m\x1b[1m\x1b[7mMeta Data" 363 if self.obj.name: 364 out += f" in {self.obj.name}" 365 out += f" contains {pd.GetNumberOfArrays()} entries\x1b[0m" 366 for i in range(pd.GetNumberOfArrays()): 367 varr = pd.GetAbstractArray(i) 368 out += f"\n\x1b[1m\x1b[4mEntry name : {varr.GetName()}\x1b[0m" 369 out += "\nindex".ljust(15) + f": {i}" 370 shape = (varr.GetNumberOfTuples(), varr.GetNumberOfComponents()) 371 out += "\nshape".ljust(15) + f": {shape}" 372 373 return out
157 def keys(self) -> List[str]: 158 """Return the list of available data array names""" 159 if self.association == 0: 160 data = self.obj.dataset.GetPointData() 161 elif self.association == 1: 162 data = self.obj.dataset.GetCellData() 163 elif self.association == 2: 164 data = self.obj.dataset.GetFieldData() 165 arrnames = [] 166 for i in range(data.GetNumberOfArrays()): 167 name = "" 168 if self.association == 2: 169 name = data.GetAbstractArray(i).GetName() 170 else: 171 iarr = data.GetArray(i) 172 if iarr: 173 name = iarr.GetName() 174 if name: 175 arrnames.append(name) 176 return arrnames
Return the list of available data array names
178 def items(self) -> List: 179 """Return the list of available data array `(names, values)`.""" 180 if self.association == 0: 181 data = self.obj.dataset.GetPointData() 182 elif self.association == 1: 183 data = self.obj.dataset.GetCellData() 184 elif self.association == 2: 185 data = self.obj.dataset.GetFieldData() 186 arrnames = [] 187 for i in range(data.GetNumberOfArrays()): 188 if self.association == 2: 189 name = data.GetAbstractArray(i).GetName() 190 else: 191 name = data.GetArray(i).GetName() 192 if name: 193 arrnames.append((name, self[name])) 194 return arrnames
Return the list of available data array (names, values)
.
196 def todict(self) -> dict: 197 """Return a dictionary of the available data arrays.""" 198 return dict(self.items())
Return a dictionary of the available data arrays.
200 def rename(self, oldname: str, newname: str) -> None: 201 """Rename an array""" 202 if self.association == 0: 203 varr = self.obj.dataset.GetPointData().GetArray(oldname) 204 elif self.association == 1: 205 varr = self.obj.dataset.GetCellData().GetArray(oldname) 206 elif self.association == 2: 207 varr = self.obj.dataset.GetFieldData().GetAbstractArray(oldname) 208 if varr: 209 varr.SetName(newname) 210 else: 211 vedo.logger.warning( 212 f"Cannot rename non existing array {oldname} to {newname}" 213 )
Rename an array
215 def remove(self, key: Union[int, str]) -> None: 216 """Remove a data array by name or number""" 217 if self.association == 0: 218 self.obj.dataset.GetPointData().RemoveArray(key) 219 elif self.association == 1: 220 self.obj.dataset.GetCellData().RemoveArray(key) 221 elif self.association == 2: 222 self.obj.dataset.GetFieldData().RemoveArray(key)
Remove a data array by name or number
224 def clear(self) -> None: 225 """Remove all data associated to this object""" 226 if self.association == 0: 227 data = self.obj.dataset.GetPointData() 228 elif self.association == 1: 229 data = self.obj.dataset.GetCellData() 230 elif self.association == 2: 231 data = self.obj.dataset.GetFieldData() 232 for i in range(data.GetNumberOfArrays()): 233 if self.association == 2: 234 name = data.GetAbstractArray(i).GetName() 235 else: 236 name = data.GetArray(i).GetName() 237 data.RemoveArray(name)
Remove all data associated to this object
239 def select(self, key: Union[int, str]) -> Any: 240 """Select one specific array by its name to make it the `active` one.""" 241 # Default (ColorModeToDefault): unsigned char scalars are treated as colors, 242 # and NOT mapped through the lookup table, while everything else is. 243 # ColorModeToDirectScalar extends ColorModeToDefault such that all integer 244 # types are treated as colors with values in the range 0-255 245 # and floating types are treated as colors with values in the range 0.0-1.0. 246 # Setting ColorModeToMapScalars means that all scalar data will be mapped 247 # through the lookup table. 248 # (Note that for multi-component scalars, the particular component 249 # to use for mapping can be specified using the SelectColorArray() method.) 250 if self.association == 0: 251 data = self.obj.dataset.GetPointData() 252 self.obj.mapper.SetScalarModeToUsePointData() 253 else: 254 data = self.obj.dataset.GetCellData() 255 self.obj.mapper.SetScalarModeToUseCellData() 256 257 if isinstance(key, int): 258 key = data.GetArrayName(key) 259 260 arr = data.GetArray(key) 261 if not arr: 262 return self.obj 263 264 nc = arr.GetNumberOfComponents() 265 # print("GetNumberOfComponents", nc) 266 if nc == 1: 267 data.SetActiveScalars(key) 268 elif nc == 2: 269 data.SetTCoords(arr) 270 elif nc in (3, 4): 271 if "rgb" in key.lower(): # type: ignore 272 data.SetActiveScalars(key) 273 try: 274 # could be a volume mapper 275 self.obj.mapper.SetColorModeToDirectScalars() 276 data.SetActiveVectors(None) # need this to fix bug in #1066 277 # print("SetColorModeToDirectScalars for", key) 278 except AttributeError: 279 pass 280 else: 281 data.SetActiveVectors(key) 282 elif nc == 9: 283 data.SetActiveTensors(key) 284 else: 285 vedo.logger.error(f"Cannot select array {key} with {nc} components") 286 return self.obj 287 288 try: 289 # could be a volume mapper 290 self.obj.mapper.SetArrayName(key) 291 self.obj.mapper.ScalarVisibilityOn() 292 except AttributeError: 293 pass 294 295 return self.obj
Select one specific array by its name to make it the active
one.
297 def select_texture_coords(self, key: Union[int,str]) -> Any: 298 """Select one specific array to be used as texture coordinates.""" 299 if self.association == 0: 300 data = self.obj.dataset.GetPointData() 301 else: 302 vedo.logger.warning("texture coordinates are only available for point data") 303 return 304 305 if isinstance(key, int): 306 key = data.GetArrayName(key) 307 data.SetTCoords(data.GetArray(key)) 308 return self.obj
Select one specific array to be used as texture coordinates.
310 def select_normals(self, key: Union[int,str]) -> Any: 311 """Select one specific normal array by its name to make it the "active" one.""" 312 if self.association == 0: 313 data = self.obj.dataset.GetPointData() 314 self.obj.mapper.SetScalarModeToUsePointData() 315 else: 316 data = self.obj.dataset.GetCellData() 317 self.obj.mapper.SetScalarModeToUseCellData() 318 319 if isinstance(key, int): 320 key = data.GetArrayName(key) 321 data.SetActiveNormals(key) 322 return self.obj
Select one specific normal array by its name to make it the "active" one.
377class CommonAlgorithms: 378 """Common algorithms.""" 379 380 @property 381 def pointdata(self): 382 """ 383 Create and/or return a `numpy.array` associated to points (vertices). 384 A data array can be indexed either as a string or by an integer number. 385 E.g.: `myobj.pointdata["arrayname"]` 386 387 Usage: 388 389 `myobj.pointdata.keys()` to return the available data array names 390 391 `myobj.pointdata.select(name)` to make this array the active one 392 393 `myobj.pointdata.remove(name)` to remove this array 394 """ 395 return DataArrayHelper(self, 0) 396 397 @property 398 def celldata(self): 399 """ 400 Create and/or return a `numpy.array` associated to cells (faces). 401 A data array can be indexed either as a string or by an integer number. 402 E.g.: `myobj.celldata["arrayname"]` 403 404 Usage: 405 406 `myobj.celldata.keys()` to return the available data array names 407 408 `myobj.celldata.select(name)` to make this array the active one 409 410 `myobj.celldata.remove(name)` to remove this array 411 """ 412 return DataArrayHelper(self, 1) 413 414 @property 415 def metadata(self): 416 """ 417 Create and/or return a `numpy.array` associated to neither cells nor faces. 418 A data array can be indexed either as a string or by an integer number. 419 E.g.: `myobj.metadata["arrayname"]` 420 421 Usage: 422 423 `myobj.metadata.keys()` to return the available data array names 424 425 `myobj.metadata.select(name)` to make this array the active one 426 427 `myobj.metadata.remove(name)` to remove this array 428 """ 429 return DataArrayHelper(self, 2) 430 431 def memory_address(self) -> int: 432 """ 433 Return a unique memory address integer which may serve as the ID of the 434 object, or passed to c++ code. 435 """ 436 # https://www.linkedin.com/pulse/speedup-your-code-accessing-python-vtk-objects-from-c-pletzer/ 437 # https://github.com/tfmoraes/polydata_connectivity 438 return int(self.dataset.GetAddressAsString("")[5:], 16) 439 440 def memory_size(self) -> int: 441 """Return the size in bytes of the object in memory.""" 442 return self.dataset.GetActualMemorySize() 443 444 def modified(self) -> Self: 445 """Use in conjunction with `tonumpy()` to update any modifications to the image array.""" 446 self.dataset.GetPointData().Modified() 447 scals = self.dataset.GetPointData().GetScalars() 448 if scals: 449 scals.Modified() 450 return self 451 452 def box(self, scale=1, padding=0) -> "vedo.Mesh": 453 """ 454 Return the bounding box as a new `Mesh` object. 455 456 Arguments: 457 scale : (float) 458 box size can be scaled by a factor 459 padding : (float, list) 460 a constant padding can be added (can be a list `[padx,pady,padz]`) 461 """ 462 b = self.bounds() 463 if not utils.is_sequence(padding): 464 padding = [padding, padding, padding] 465 length, width, height = b[1] - b[0], b[3] - b[2], b[5] - b[4] 466 tol = (length + width + height) / 30000 # useful for boxing text 467 pos = [(b[0] + b[1]) / 2, (b[3] + b[2]) / 2, (b[5] + b[4]) / 2 - tol] 468 bx = vedo.shapes.Box( 469 pos, 470 length * scale + padding[0], 471 width * scale + padding[1], 472 height * scale + padding[2], 473 c="gray", 474 ) 475 try: 476 pr = vtki.vtkProperty() 477 pr.DeepCopy(self.properties) 478 bx.actor.SetProperty(pr) 479 bx.properties = pr 480 except (AttributeError, TypeError): 481 pass 482 bx.flat().lighting("off").wireframe(True) 483 return bx 484 485 def update_dataset(self, dataset, **kwargs) -> Self: 486 """Update the dataset of the object with the provided VTK dataset.""" 487 self._update(dataset, **kwargs) 488 return self 489 490 def bounds(self) -> np.ndarray: 491 """ 492 Get the object bounds. 493 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 494 """ 495 try: # this is very slow for large meshes 496 pts = self.vertices 497 xmin, ymin, zmin = np.min(pts, axis=0) 498 xmax, ymax, zmax = np.max(pts, axis=0) 499 return np.array([xmin, xmax, ymin, ymax, zmin, zmax]) 500 except (AttributeError, ValueError): 501 return np.array(self.dataset.GetBounds()) 502 503 def xbounds(self, i=None) -> np.ndarray: 504 """Get the bounds `[xmin,xmax]`. Can specify upper or lower with i (0,1).""" 505 b = self.bounds() 506 if i is not None: 507 return b[i] 508 return np.array([b[0], b[1]]) 509 510 def ybounds(self, i=None) -> np.ndarray: 511 """Get the bounds `[ymin,ymax]`. Can specify upper or lower with i (0,1).""" 512 b = self.bounds() 513 if i == 0: 514 return b[2] 515 if i == 1: 516 return b[3] 517 return np.array([b[2], b[3]]) 518 519 def zbounds(self, i=None) -> np.ndarray: 520 """Get the bounds `[zmin,zmax]`. Can specify upper or lower with i (0,1).""" 521 b = self.bounds() 522 if i == 0: 523 return b[4] 524 if i == 1: 525 return b[5] 526 return np.array([b[4], b[5]]) 527 528 def diagonal_size(self) -> float: 529 """Get the length of the diagonal of the bounding box.""" 530 b = self.bounds() 531 return np.sqrt((b[1] - b[0])**2 + (b[3] - b[2])**2 + (b[5] - b[4])**2) 532 533 def average_size(self) -> float: 534 """ 535 Calculate and return the average size of the object. 536 This is the mean of the vertex distances from the center of mass. 537 """ 538 coords = self.vertices 539 cm = np.mean(coords, axis=0) 540 if coords.shape[0] == 0: 541 return 0.0 542 cc = coords - cm 543 return np.mean(np.linalg.norm(cc, axis=1)) 544 545 def center_of_mass(self) -> np.ndarray: 546 """Get the center of mass of the object.""" 547 if isinstance(self, (vedo.RectilinearGrid, vedo.Volume)): 548 return np.array(self.dataset.GetCenter()) 549 cmf = vtki.new("CenterOfMass") 550 cmf.SetInputData(self.dataset) 551 cmf.Update() 552 c = cmf.GetCenter() 553 return np.array(c) 554 555 def copy_data_from(self, obj: Any) -> Self: 556 """Copy all data (point and cell data) from this input object""" 557 self.dataset.GetPointData().PassData(obj.dataset.GetPointData()) 558 self.dataset.GetCellData().PassData(obj.dataset.GetCellData()) 559 self.pipeline = utils.OperationNode( 560 "copy_data_from", 561 parents=[self, obj], 562 comment=f"{obj.__class__.__name__}", 563 shape="note", 564 c="#ccc5b9", 565 ) 566 return self 567 568 def inputdata(self): 569 """Obsolete, use `.dataset` instead.""" 570 colors.printc("WARNING: 'inputdata()' is obsolete, use '.dataset' instead.", c="y") 571 return self.dataset 572 573 @property 574 def npoints(self): 575 """Retrieve the number of points (or vertices).""" 576 return self.dataset.GetNumberOfPoints() 577 578 @property 579 def nvertices(self): 580 """Retrieve the number of vertices (or points).""" 581 return self.dataset.GetNumberOfPoints() 582 583 @property 584 def ncells(self): 585 """Retrieve the number of cells.""" 586 return self.dataset.GetNumberOfCells() 587 588 def points(self, pts=None): 589 """Obsolete, use `self.vertices` or `self.coordinates` instead.""" 590 if pts is None: ### getter 591 592 if warnings["points_getter"]: 593 colors.printc(warnings["points_getter"], c="y") 594 warnings["points_getter"] = "" 595 return self.vertices 596 597 else: ### setter 598 599 if warnings["points_setter"]: 600 colors.printc(warnings["points_setter"], c="y") 601 warnings["points_setter"] = "" 602 603 pts = np.asarray(pts, dtype=np.float32) 604 605 if pts.ndim == 1: 606 ### getter by point index ################### 607 indices = pts.astype(int) 608 vpts = self.dataset.GetPoints() 609 arr = utils.vtk2numpy(vpts.GetData()) 610 return arr[indices] ########### 611 612 ### setter #################################### 613 if pts.shape[1] == 2: 614 pts = np.c_[pts, np.zeros(pts.shape[0], dtype=np.float32)] 615 arr = utils.numpy2vtk(pts, dtype=np.float32) 616 617 vpts = self.dataset.GetPoints() 618 vpts.SetData(arr) 619 vpts.Modified() 620 # reset mesh to identity matrix position/rotation: 621 self.point_locator = None 622 self.cell_locator = None 623 self.line_locator = None 624 self.transform = LinearTransform() 625 return self 626 627 @property 628 def cell_centers(self): 629 """ 630 Get the coordinates of the cell centers. 631 632 Examples: 633 - [delaunay2d.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/delaunay2d.py) 634 635 See also: `CellCenters()`. 636 """ 637 vcen = vtki.new("CellCenters") 638 vcen.CopyArraysOff() 639 vcen.SetInputData(self.dataset) 640 vcen.Update() 641 return utils.vtk2numpy(vcen.GetOutput().GetPoints().GetData()) 642 643 @property 644 def lines(self): 645 """ 646 Get lines connectivity ids as a python array 647 formatted as `[[id0,id1], [id3,id4], ...]` 648 649 See also: `lines_as_flat_array()`. 650 """ 651 # Get cell connettivity ids as a 1D array. The vtk format is: 652 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 653 try: 654 arr1d = utils.vtk2numpy(self.dataset.GetLines().GetData()) 655 except AttributeError: 656 return np.array([]) 657 i = 0 658 conn = [] 659 n = len(arr1d) 660 for _ in range(n): 661 cell = [arr1d[i + k + 1] for k in range(arr1d[i])] 662 conn.append(cell) 663 i += arr1d[i] + 1 664 if i >= n: 665 break 666 667 return conn # cannot always make a numpy array of it! 668 669 @property 670 def lines_as_flat_array(self): 671 """ 672 Get lines connectivity ids as a 1D numpy array. 673 Format is e.g. [2, 10,20, 3, 10,11,12, 2, 70,80, ...] 674 675 See also: `lines()`. 676 """ 677 try: 678 return utils.vtk2numpy(self.dataset.GetLines().GetData()) 679 except AttributeError: 680 return np.array([]) 681 682 def mark_boundaries(self) -> Self: 683 """ 684 Mark cells and vertices if they lie on a boundary. 685 A new array called `BoundaryCells` is added to the object. 686 """ 687 mb = vtki.new("MarkBoundaryFilter") 688 mb.SetInputData(self.dataset) 689 mb.Update() 690 self.dataset.DeepCopy(mb.GetOutput()) 691 self.pipeline = utils.OperationNode("mark_boundaries", parents=[self]) 692 return self 693 694 def find_cells_in_bounds(self, xbounds=(), ybounds=(), zbounds=()) -> np.ndarray: 695 """ 696 Find cells that are within the specified bounds. 697 """ 698 try: 699 xbounds = list(xbounds.bounds()) 700 except AttributeError: 701 pass 702 703 if len(xbounds) == 6: 704 bnds = xbounds 705 else: 706 bnds = list(self.bounds()) 707 if len(xbounds) == 2: 708 bnds[0] = xbounds[0] 709 bnds[1] = xbounds[1] 710 if len(ybounds) == 2: 711 bnds[2] = ybounds[0] 712 bnds[3] = ybounds[1] 713 if len(zbounds) == 2: 714 bnds[4] = zbounds[0] 715 bnds[5] = zbounds[1] 716 717 cell_ids = vtki.vtkIdList() 718 if not self.cell_locator: 719 self.cell_locator = vtki.new("CellTreeLocator") 720 self.cell_locator.SetDataSet(self.dataset) 721 self.cell_locator.BuildLocator() 722 self.cell_locator.FindCellsWithinBounds(bnds, cell_ids) 723 cids = [] 724 for i in range(cell_ids.GetNumberOfIds()): 725 cid = cell_ids.GetId(i) 726 cids.append(cid) 727 return np.array(cids) 728 729 def find_cells_along_line(self, p0, p1, tol=0.001) -> np.ndarray: 730 """ 731 Find cells that are intersected by a line segment. 732 """ 733 cell_ids = vtki.vtkIdList() 734 if not self.cell_locator: 735 self.cell_locator = vtki.new("CellTreeLocator") 736 self.cell_locator.SetDataSet(self.dataset) 737 self.cell_locator.BuildLocator() 738 self.cell_locator.FindCellsAlongLine(p0, p1, tol, cell_ids) 739 cids = [] 740 for i in range(cell_ids.GetNumberOfIds()): 741 cid = cell_ids.GetId(i) 742 cids.append(cid) 743 return np.array(cids) 744 745 def find_cells_along_plane(self, origin, normal, tol=0.001) -> np.ndarray: 746 """ 747 Find cells that are intersected by a plane. 748 """ 749 cell_ids = vtki.vtkIdList() 750 if not self.cell_locator: 751 self.cell_locator = vtki.new("CellTreeLocator") 752 self.cell_locator.SetDataSet(self.dataset) 753 self.cell_locator.BuildLocator() 754 self.cell_locator.FindCellsAlongPlane(origin, normal, tol, cell_ids) 755 cids = [] 756 for i in range(cell_ids.GetNumberOfIds()): 757 cid = cell_ids.GetId(i) 758 cids.append(cid) 759 return np.array(cids) 760 761 def keep_cell_types(self, types=()): 762 """ 763 Extract cells of a specific type. 764 765 Check the VTK cell types here: 766 https://vtk.org/doc/nightly/html/vtkCellType_8h.html 767 """ 768 fe = vtki.new("ExtractCellsByType") 769 fe.SetInputData(self.dataset) 770 for t in types: 771 try: 772 if utils.is_integer(t): 773 it = t 774 else: 775 it = vtki.cell_types[t.upper()] 776 except KeyError: 777 vedo.logger.error(f"Cell type '{t}' not recognized") 778 continue 779 fe.AddCellType(it) 780 fe.Update() 781 self._update(fe.GetOutput()) 782 return self 783 784 def map_cells_to_points(self, arrays=(), move=False) -> Self: 785 """ 786 Interpolate cell data (i.e., data specified per cell or face) 787 into point data (i.e., data specified at each vertex). 788 The method of transformation is based on averaging the data values 789 of all cells using a particular point. 790 791 A custom list of arrays to be mapped can be passed in input. 792 793 Set `move=True` to delete the original `celldata` array. 794 """ 795 c2p = vtki.new("CellDataToPointData") 796 c2p.SetInputData(self.dataset) 797 if not move: 798 c2p.PassCellDataOn() 799 if arrays: 800 c2p.ClearCellDataArrays() 801 c2p.ProcessAllArraysOff() 802 for arr in arrays: 803 c2p.AddCellDataArray(arr) 804 else: 805 c2p.ProcessAllArraysOn() 806 c2p.Update() 807 self._update(c2p.GetOutput(), reset_locators=False) 808 self.mapper.SetScalarModeToUsePointData() 809 self.pipeline = utils.OperationNode("map_cells_to_points", parents=[self]) 810 return self 811 812 @property 813 def vertices(self): 814 """Return the vertices (points) coordinates.""" 815 try: 816 # for polydata and unstructured grid 817 varr = self.dataset.GetPoints().GetData() 818 except (AttributeError, TypeError): 819 try: 820 # for RectilinearGrid, StructuredGrid 821 vpts = vtki.vtkPoints() 822 self.dataset.GetPoints(vpts) 823 varr = vpts.GetData() 824 except (AttributeError, TypeError): 825 try: 826 # for ImageData 827 v2p = vtki.new("ImageToPoints") 828 v2p.SetInputData(self.dataset) 829 v2p.Update() 830 varr = v2p.GetOutput().GetPoints().GetData() 831 except AttributeError: 832 return np.array([]) 833 834 return utils.vtk2numpy(varr) 835 836 # setter 837 @vertices.setter 838 def vertices(self, pts): 839 """Set vertices (points) coordinates.""" 840 pts = utils.make3d(pts) 841 arr = utils.numpy2vtk(pts, dtype=np.float32) 842 try: 843 vpts = self.dataset.GetPoints() 844 vpts.SetData(arr) 845 vpts.Modified() 846 except (AttributeError, TypeError): 847 vedo.logger.error(f"Cannot set vertices for {type(self)}") 848 return self 849 # reset mesh to identity matrix position/rotation: 850 self.point_locator = None 851 self.cell_locator = None 852 self.line_locator = None 853 self.transform = LinearTransform() 854 855 @property 856 def coordinates(self): 857 """Return the vertices (points) coordinates. Same as `vertices`.""" 858 return self.vertices 859 860 @coordinates.setter 861 def coordinates(self, pts): 862 """Set vertices (points) coordinates. Same as `vertices`.""" 863 self.vertices = pts 864 865 @property 866 def cells_as_flat_array(self): 867 """ 868 Get cell connectivity ids as a 1D numpy array. 869 Format is e.g. [3, 10,20,30 4, 10,11,12,13 ...] 870 """ 871 try: 872 # valid for unstructured grid 873 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 874 except AttributeError: 875 # valid for polydata 876 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 877 return arr1d 878 879 @property 880 def cells(self): 881 """ 882 Get the cells connectivity ids as a numpy array. 883 884 The output format is: `[[id0 ... idn], [id0 ... idm], etc]`. 885 """ 886 try: 887 # valid for unstructured grid 888 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 889 except AttributeError: 890 try: 891 # valid for polydata 892 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 893 except AttributeError: 894 vedo.logger.warning(f"Cannot get cells for {type(self)}") 895 return np.array([]) 896 897 # Get cell connettivity ids as a 1D array. vtk format is: 898 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 899 i = 0 900 conn = [] 901 n = len(arr1d) 902 if n: 903 while True: 904 cell = [arr1d[i + k] for k in range(1, arr1d[i] + 1)] 905 conn.append(cell) 906 i += arr1d[i] + 1 907 if i >= n: 908 break 909 return conn 910 911 def map_points_to_cells(self, arrays=(), move=False) -> Self: 912 """ 913 Interpolate point data (i.e., data specified per point or vertex) 914 into cell data (i.e., data specified per cell). 915 The method of transformation is based on averaging the data values 916 of all points defining a particular cell. 917 918 A custom list of arrays to be mapped can be passed in input. 919 920 Set `move=True` to delete the original `pointdata` array. 921 922 Examples: 923 - [mesh_map2cell.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/mesh_map2cell.py) 924 """ 925 p2c = vtki.new("PointDataToCellData") 926 p2c.SetInputData(self.dataset) 927 if not move: 928 p2c.PassPointDataOn() 929 if arrays: 930 p2c.ClearPointDataArrays() 931 p2c.ProcessAllArraysOff() 932 for arr in arrays: 933 p2c.AddPointDataArray(arr) 934 else: 935 p2c.ProcessAllArraysOn() 936 p2c.Update() 937 self._update(p2c.GetOutput(), reset_locators=False) 938 self.mapper.SetScalarModeToUseCellData() 939 self.pipeline = utils.OperationNode("map_points_to_cells", parents=[self]) 940 return self 941 942 def resample_data_from(self, source, tol=None, categorical=False) -> Self: 943 """ 944 Resample point and cell data from another dataset. 945 The output has the same structure but its point data have 946 the resampled values from target. 947 948 Use `tol` to set the tolerance used to compute whether 949 a point in the source is in a cell of the current object. 950 Points without resampled values, and their cells, are marked as blank. 951 If the data is categorical, then the resulting data will be determined 952 by a nearest neighbor interpolation scheme. 953 954 Example: 955 ```python 956 from vedo import * 957 m1 = Mesh(dataurl+'bunny.obj')#.add_gaussian_noise(0.1) 958 pts = m1.vertices 959 ces = m1.cell_centers 960 m1.pointdata["xvalues"] = np.power(pts[:,0], 3) 961 m1.celldata["yvalues"] = np.power(ces[:,1], 3) 962 m2 = Mesh(dataurl+'bunny.obj') 963 m2.resample_data_from(m1) 964 # print(m2.pointdata["xvalues"]) 965 show(m1, m2 , N=2, axes=1) 966 ``` 967 """ 968 rs = vtki.new("ResampleWithDataSet") 969 rs.SetInputData(self.dataset) 970 rs.SetSourceData(source.dataset) 971 972 rs.SetPassPointArrays(True) 973 rs.SetPassCellArrays(True) 974 rs.SetPassFieldArrays(True) 975 rs.SetCategoricalData(categorical) 976 977 rs.SetComputeTolerance(True) 978 if tol: 979 rs.SetComputeTolerance(False) 980 rs.SetTolerance(tol) 981 rs.Update() 982 self._update(rs.GetOutput(), reset_locators=False) 983 self.pipeline = utils.OperationNode( 984 "resample_data_from", 985 comment=f"{source.__class__.__name__}", 986 parents=[self, source], 987 ) 988 return self 989 990 def interpolate_data_from( 991 self, 992 source, 993 radius=None, 994 n=None, 995 kernel="shepard", 996 exclude=("Normals",), 997 on="points", 998 null_strategy=1, 999 null_value=0, 1000 ) -> Self: 1001 """ 1002 Interpolate over source to port its data onto the current object using various kernels. 1003 1004 If n (number of closest points to use) is set then radius value is ignored. 1005 1006 Check out also: 1007 `probe()` which in many cases can be faster. 1008 1009 Arguments: 1010 kernel : (str) 1011 available kernels are [shepard, gaussian, linear] 1012 null_strategy : (int) 1013 specify a strategy to use when encountering a "null" point 1014 during the interpolation process. Null points occur when the local neighborhood 1015 (of nearby points to interpolate from) is empty. 1016 1017 - Case 0: an output array is created that marks points 1018 as being valid (=1) or null (invalid =0), and the null_value is set as well 1019 - Case 1: the output data value(s) are set to the provided null_value 1020 - Case 2: simply use the closest point to perform the interpolation. 1021 null_value : (float) 1022 see above. 1023 1024 Examples: 1025 - [interpolate_scalar1.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar1.py) 1026 - [interpolate_scalar3.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar3.py) 1027 - [interpolate_scalar4.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar4.py) 1028 - [image_probe.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/image_probe.py) 1029 1030 ![](https://vedo.embl.es/images/advanced/interpolateMeshArray.png) 1031 """ 1032 if radius is None and not n: 1033 vedo.logger.error("in interpolate_data_from(): please set either radius or n") 1034 raise RuntimeError 1035 1036 if on == "points": 1037 points = source.dataset 1038 elif on == "cells": 1039 c2p = vtki.new("CellDataToPointData") 1040 c2p.SetInputData(source.dataset) 1041 c2p.Update() 1042 points = c2p.GetOutput() 1043 else: 1044 vedo.logger.error("in interpolate_data_from(), on must be on points or cells") 1045 raise RuntimeError() 1046 1047 locator = vtki.new("PointLocator") 1048 locator.SetDataSet(points) 1049 locator.BuildLocator() 1050 1051 if kernel.lower() == "shepard": 1052 kern = vtki.new("ShepardKernel") 1053 kern.SetPowerParameter(2) 1054 elif kernel.lower() == "gaussian": 1055 kern = vtki.new("GaussianKernel") 1056 kern.SetSharpness(2) 1057 elif kernel.lower() == "linear": 1058 kern = vtki.new("LinearKernel") 1059 else: 1060 vedo.logger.error("available kernels are: [shepard, gaussian, linear]") 1061 raise RuntimeError() 1062 1063 if n: 1064 kern.SetNumberOfPoints(n) 1065 kern.SetKernelFootprintToNClosest() 1066 else: 1067 kern.SetRadius(radius) 1068 kern.SetKernelFootprintToRadius() 1069 1070 interpolator = vtki.new("PointInterpolator") 1071 interpolator.SetInputData(self.dataset) 1072 interpolator.SetSourceData(points) 1073 interpolator.SetKernel(kern) 1074 interpolator.SetLocator(locator) 1075 interpolator.PassFieldArraysOn() 1076 interpolator.SetNullPointsStrategy(null_strategy) 1077 interpolator.SetNullValue(null_value) 1078 interpolator.SetValidPointsMaskArrayName("ValidPointMask") 1079 for ex in exclude: 1080 interpolator.AddExcludedArray(ex) 1081 1082 # remove arrays that are already present in the source 1083 # this is because the interpolator will ignore them otherwise 1084 for i in range(self.dataset.GetPointData().GetNumberOfArrays()): 1085 name = self.dataset.GetPointData().GetArrayName(i) 1086 if name not in exclude: 1087 self.dataset.GetPointData().RemoveArray(name) 1088 1089 interpolator.Update() 1090 1091 if on == "cells": 1092 p2c = vtki.new("PointDataToCellData") 1093 p2c.SetInputData(interpolator.GetOutput()) 1094 p2c.Update() 1095 cpoly = p2c.GetOutput() 1096 else: 1097 cpoly = interpolator.GetOutput() 1098 1099 self._update(cpoly, reset_locators=False) 1100 1101 self.pipeline = utils.OperationNode("interpolate_data_from", parents=[self, source]) 1102 return self 1103 1104 def add_ids(self) -> Self: 1105 """ 1106 Generate point and cell ids arrays. 1107 1108 Two new arrays are added to the mesh: `PointID` and `CellID`. 1109 """ 1110 ids = vtki.new("IdFilter") 1111 ids.SetInputData(self.dataset) 1112 ids.PointIdsOn() 1113 ids.CellIdsOn() 1114 ids.FieldDataOff() 1115 ids.SetPointIdsArrayName("PointID") 1116 ids.SetCellIdsArrayName("CellID") 1117 ids.Update() 1118 self._update(ids.GetOutput(), reset_locators=False) 1119 self.pipeline = utils.OperationNode("add_ids", parents=[self]) 1120 return self 1121 1122 def gradient(self, input_array=None, on="points", fast=False) -> np.ndarray: 1123 """ 1124 Compute and return the gradiend of the active scalar field as a numpy array. 1125 1126 Arguments: 1127 input_array : (str) 1128 array of the scalars to compute the gradient, 1129 if None the current active array is selected 1130 on : (str) 1131 compute either on 'points' or 'cells' data 1132 fast : (bool) 1133 if True, will use a less accurate algorithm 1134 that performs fewer derivative calculations (and is therefore faster). 1135 1136 Examples: 1137 - [isolines.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/isolines.py) 1138 1139 ![](https://user-images.githubusercontent.com/32848391/72433087-f00a8780-3798-11ea-9778-991f0abeca70.png) 1140 """ 1141 gra = vtki.new("GradientFilter") 1142 if on.startswith("p"): 1143 varr = self.dataset.GetPointData() 1144 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1145 elif on.startswith("c"): 1146 varr = self.dataset.GetCellData() 1147 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1148 else: 1149 vedo.logger.error(f"in gradient: unknown option {on}") 1150 raise RuntimeError 1151 1152 if input_array is None: 1153 if varr.GetScalars(): 1154 input_array = varr.GetScalars().GetName() 1155 else: 1156 vedo.logger.error(f"in gradient: no scalars found for {on}") 1157 raise RuntimeError 1158 1159 gra.SetInputData(self.dataset) 1160 gra.SetInputScalars(tp, input_array) 1161 gra.SetResultArrayName("Gradient") 1162 gra.SetFasterApproximation(fast) 1163 gra.ComputeDivergenceOff() 1164 gra.ComputeVorticityOff() 1165 gra.ComputeGradientOn() 1166 gra.Update() 1167 if on.startswith("p"): 1168 gvecs = utils.vtk2numpy(gra.GetOutput().GetPointData().GetArray("Gradient")) 1169 else: 1170 gvecs = utils.vtk2numpy(gra.GetOutput().GetCellData().GetArray("Gradient")) 1171 return gvecs 1172 1173 def divergence(self, array_name=None, on="points", fast=False) -> np.ndarray: 1174 """ 1175 Compute and return the divergence of a vector field as a numpy array. 1176 1177 Arguments: 1178 array_name : (str) 1179 name of the array of vectors to compute the divergence, 1180 if None the current active array is selected 1181 on : (str) 1182 compute either on 'points' or 'cells' data 1183 fast : (bool) 1184 if True, will use a less accurate algorithm 1185 that performs fewer derivative calculations (and is therefore faster). 1186 """ 1187 div = vtki.new("GradientFilter") 1188 if on.startswith("p"): 1189 varr = self.dataset.GetPointData() 1190 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1191 elif on.startswith("c"): 1192 varr = self.dataset.GetCellData() 1193 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1194 else: 1195 vedo.logger.error(f"in divergence(): unknown option {on}") 1196 raise RuntimeError 1197 1198 if array_name is None: 1199 if varr.GetVectors(): 1200 array_name = varr.GetVectors().GetName() 1201 else: 1202 vedo.logger.error(f"in divergence(): no vectors found for {on}") 1203 raise RuntimeError 1204 1205 div.SetInputData(self.dataset) 1206 div.SetInputScalars(tp, array_name) 1207 div.ComputeDivergenceOn() 1208 div.ComputeGradientOff() 1209 div.ComputeVorticityOff() 1210 div.SetDivergenceArrayName("Divergence") 1211 div.SetFasterApproximation(fast) 1212 div.Update() 1213 if on.startswith("p"): 1214 dvecs = utils.vtk2numpy(div.GetOutput().GetPointData().GetArray("Divergence")) 1215 else: 1216 dvecs = utils.vtk2numpy(div.GetOutput().GetCellData().GetArray("Divergence")) 1217 return dvecs 1218 1219 def vorticity(self, array_name=None, on="points", fast=False) -> np.ndarray: 1220 """ 1221 Compute and return the vorticity of a vector field as a numpy array. 1222 1223 Arguments: 1224 array_name : (str) 1225 name of the array to compute the vorticity, 1226 if None the current active array is selected 1227 on : (str) 1228 compute either on 'points' or 'cells' data 1229 fast : (bool) 1230 if True, will use a less accurate algorithm 1231 that performs fewer derivative calculations (and is therefore faster). 1232 """ 1233 vort = vtki.new("GradientFilter") 1234 if on.startswith("p"): 1235 varr = self.dataset.GetPointData() 1236 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1237 elif on.startswith("c"): 1238 varr = self.dataset.GetCellData() 1239 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1240 else: 1241 vedo.logger.error(f"in vorticity(): unknown option {on}") 1242 raise RuntimeError 1243 1244 if array_name is None: 1245 if varr.GetVectors(): 1246 array_name = varr.GetVectors().GetName() 1247 else: 1248 vedo.logger.error(f"in vorticity(): no vectors found for {on}") 1249 raise RuntimeError 1250 1251 vort.SetInputData(self.dataset) 1252 vort.SetInputScalars(tp, array_name) 1253 vort.ComputeDivergenceOff() 1254 vort.ComputeGradientOff() 1255 vort.ComputeVorticityOn() 1256 vort.SetVorticityArrayName("Vorticity") 1257 vort.SetFasterApproximation(fast) 1258 vort.Update() 1259 if on.startswith("p"): 1260 vvecs = utils.vtk2numpy(vort.GetOutput().GetPointData().GetArray("Vorticity")) 1261 else: 1262 vvecs = utils.vtk2numpy(vort.GetOutput().GetCellData().GetArray("Vorticity")) 1263 return vvecs 1264 1265 def probe( 1266 self, 1267 source, 1268 categorical=False, 1269 snap=False, 1270 tol=0, 1271 ) -> Self: 1272 """ 1273 Takes a data set and probes its scalars at the specified points in space. 1274 1275 Note that a mask is also output with valid/invalid points which can be accessed 1276 with `mesh.pointdata['ValidPointMask']`. 1277 1278 Arguments: 1279 source : any dataset 1280 the data set to probe. 1281 categorical : bool 1282 control whether the source pointdata is to be treated as categorical. 1283 snap : bool 1284 snap to the cell with the closest point if no cell was found 1285 tol : float 1286 the tolerance to use when performing the probe. 1287 1288 Check out also: 1289 `interpolate_data_from()` and `tovolume()` 1290 1291 Examples: 1292 - [probe_points.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/probe_points.py) 1293 1294 ![](https://vedo.embl.es/images/volumetric/probePoints.png) 1295 """ 1296 probe_filter = vtki.new("ProbeFilter") 1297 probe_filter.SetSourceData(source.dataset) 1298 probe_filter.SetInputData(self.dataset) 1299 probe_filter.PassCellArraysOn() 1300 probe_filter.PassFieldArraysOn() 1301 probe_filter.PassPointArraysOn() 1302 probe_filter.SetCategoricalData(categorical) 1303 probe_filter.ComputeToleranceOff() 1304 if tol: 1305 probe_filter.ComputeToleranceOn() 1306 probe_filter.SetTolerance(tol) 1307 probe_filter.SetSnapToCellWithClosestPoint(snap) 1308 probe_filter.Update() 1309 self._update(probe_filter.GetOutput(), reset_locators=False) 1310 self.pipeline = utils.OperationNode("probe", parents=[self, source]) 1311 self.pointdata.rename("vtkValidPointMask", "ValidPointMask") 1312 return self 1313 1314 def compute_cell_size(self) -> Self: 1315 """ 1316 Add to this object a cell data array 1317 containing the area, volume and edge length 1318 of the cells (when applicable to the object type). 1319 1320 Array names are: `Area`, `Volume`, `Length`. 1321 """ 1322 csf = vtki.new("CellSizeFilter") 1323 csf.SetInputData(self.dataset) 1324 csf.SetComputeArea(1) 1325 csf.SetComputeVolume(1) 1326 csf.SetComputeLength(1) 1327 csf.SetComputeVertexCount(0) 1328 csf.SetAreaArrayName("Area") 1329 csf.SetVolumeArrayName("Volume") 1330 csf.SetLengthArrayName("Length") 1331 csf.Update() 1332 self._update(csf.GetOutput(), reset_locators=False) 1333 return self 1334 1335 def generate_random_data(self) -> Self: 1336 """Fill a dataset with random attributes""" 1337 gen = vtki.new("RandomAttributeGenerator") 1338 gen.SetInputData(self.dataset) 1339 gen.GenerateAllDataOn() 1340 gen.SetDataTypeToFloat() 1341 gen.GeneratePointNormalsOff() 1342 gen.GeneratePointTensorsOn() 1343 gen.GenerateCellScalarsOn() 1344 gen.Update() 1345 self._update(gen.GetOutput(), reset_locators=False) 1346 self.pipeline = utils.OperationNode("generate_random_data", parents=[self]) 1347 return self 1348 1349 def integrate_data(self) -> dict: 1350 """ 1351 Integrate point and cell data arrays while computing length, 1352 area or volume of the domain. It works for 1D, 2D or 3D cells. 1353 1354 For volumetric datasets, this filter ignores all but 3D cells. 1355 It will not compute the volume contained in a closed surface. 1356 1357 Returns a dictionary with keys: `pointdata`, `celldata`, `metadata`, 1358 which contain the integration result for the corresponding attributes. 1359 1360 Examples: 1361 ```python 1362 from vedo import * 1363 surf = Sphere(res=100) 1364 surf.pointdata['scalars'] = np.ones(surf.npoints) 1365 data = surf.integrate_data() 1366 print(data['pointdata']['scalars'], "is equal to 4pi", 4*np.pi) 1367 ``` 1368 1369 ```python 1370 from vedo import * 1371 1372 xcoords1 = np.arange(0, 2.2, 0.2) 1373 xcoords2 = sqrt(np.arange(0, 4.2, 0.2)) 1374 1375 ycoords = np.arange(0, 1.2, 0.2) 1376 1377 surf1 = Grid(s=(xcoords1, ycoords)).rotate_y(-45).lw(2) 1378 surf2 = Grid(s=(xcoords2, ycoords)).rotate_y(-45).lw(2) 1379 1380 surf1.pointdata['scalars'] = surf1.vertices[:,2] 1381 surf2.pointdata['scalars'] = surf2.vertices[:,2] 1382 1383 data1 = surf1.integrate_data() 1384 data2 = surf2.integrate_data() 1385 1386 print(data1['pointdata']['scalars'], 1387 "is equal to", 1388 data2['pointdata']['scalars'], 1389 "even if the grids are different!", 1390 "(= the volume under the surface)" 1391 ) 1392 show(surf1, surf2, N=2, axes=1).close() 1393 ``` 1394 """ 1395 vinteg = vtki.new("IntegrateAttributes") 1396 vinteg.SetInputData(self.dataset) 1397 vinteg.Update() 1398 ugrid = vedo.UnstructuredGrid(vinteg.GetOutput()) 1399 data = dict( 1400 pointdata=ugrid.pointdata.todict(), 1401 celldata=ugrid.celldata.todict(), 1402 metadata=ugrid.metadata.todict(), 1403 ) 1404 return data 1405 1406 def write(self, filename, binary=True) -> None: 1407 """Write object to file.""" 1408 out = vedo.file_io.write(self, filename, binary) 1409 out.pipeline = utils.OperationNode( 1410 "write", parents=[self], comment=filename[:15], shape="folder", c="#8a817c" 1411 ) 1412 1413 def tomesh(self, bounds=(), shrink=0) -> "vedo.Mesh": 1414 """ 1415 Extract boundary geometry from dataset (or convert data to polygonal type). 1416 1417 Two new arrays are added to the mesh: `OriginalCellIds` and `OriginalPointIds` 1418 to keep track of the original mesh elements. 1419 1420 Arguments: 1421 bounds : (list) 1422 specify a sub-region to extract 1423 shrink : (float) 1424 shrink the cells to a fraction of their original size 1425 """ 1426 geo = vtki.new("GeometryFilter") 1427 1428 if shrink: 1429 sf = vtki.new("ShrinkFilter") 1430 sf.SetInputData(self.dataset) 1431 sf.SetShrinkFactor(shrink) 1432 sf.Update() 1433 geo.SetInputData(sf.GetOutput()) 1434 else: 1435 geo.SetInputData(self.dataset) 1436 1437 geo.SetPassThroughCellIds(1) 1438 geo.SetPassThroughPointIds(1) 1439 geo.SetOriginalCellIdsName("OriginalCellIds") 1440 geo.SetOriginalPointIdsName("OriginalPointIds") 1441 geo.SetNonlinearSubdivisionLevel(1) 1442 # geo.MergingOff() # crashes on StructuredGrids 1443 if bounds: 1444 geo.SetExtent(bounds) 1445 geo.ExtentClippingOn() 1446 geo.Update() 1447 msh = vedo.mesh.Mesh(geo.GetOutput()) 1448 msh.pipeline = utils.OperationNode("tomesh", parents=[self], c="#9e2a2b") 1449 return msh 1450 1451 def signed_distance(self, dims=(20, 20, 20), bounds=None, invert=False, max_radius=None) -> "vedo.Volume": 1452 """ 1453 Compute the `Volume` object whose voxels contains the signed distance from 1454 the object. The calling object must have "Normals" defined. 1455 1456 Arguments: 1457 bounds : (list, actor) 1458 bounding box sizes 1459 dims : (list) 1460 dimensions (nr. of voxels) of the output volume. 1461 invert : (bool) 1462 flip the sign 1463 max_radius : (float) 1464 specify how far out to propagate distance calculation 1465 1466 Examples: 1467 - [distance2mesh.py](https://github.com/marcomusy/vedo/blob/master/examples/basic/distance2mesh.py) 1468 1469 ![](https://vedo.embl.es/images/basic/distance2mesh.png) 1470 """ 1471 if bounds is None: 1472 bounds = self.bounds() 1473 if max_radius is None: 1474 max_radius = self.diagonal_size() / 2 1475 dist = vtki.new("SignedDistance") 1476 dist.SetInputData(self.dataset) 1477 dist.SetRadius(max_radius) 1478 dist.SetBounds(bounds) 1479 dist.SetDimensions(dims) 1480 dist.Update() 1481 img = dist.GetOutput() 1482 if invert: 1483 mat = vtki.new("ImageMathematics") 1484 mat.SetInput1Data(img) 1485 mat.SetOperationToMultiplyByK() 1486 mat.SetConstantK(-1) 1487 mat.Update() 1488 img = mat.GetOutput() 1489 1490 vol = vedo.Volume(img) 1491 vol.name = "SignedDistanceVolume" 1492 vol.pipeline = utils.OperationNode( 1493 "signed_distance", 1494 parents=[self], 1495 comment=f"dims={tuple(vol.dimensions())}", 1496 c="#e9c46a:#0096c7", 1497 ) 1498 return vol 1499 1500 def unsigned_distance( 1501 self, dims=(25,25,25), bounds=(), max_radius=0, cap_value=0) -> "vedo.Volume": 1502 """ 1503 Compute the `Volume` object whose voxels contains the unsigned distance. 1504 """ 1505 dist = vtki.new("UnsignedDistance") 1506 dist.SetInputData(self.dataset) 1507 dist.SetDimensions(dims) 1508 1509 if len(bounds) == 6: 1510 dist.SetBounds(bounds) 1511 # elif bounds == "auto": 1512 # dist.AdjustBoundsOn() 1513 else: 1514 dist.SetBounds(self.bounds()) 1515 if not max_radius: 1516 max_radius = self.diagonal_size() / 10 1517 dist.SetRadius(max_radius) 1518 1519 if self.point_locator: 1520 dist.SetLocator(self.point_locator) 1521 1522 if cap_value is not None: 1523 dist.CappingOn() 1524 dist.SetCapValue(cap_value) 1525 dist.SetOutputScalarTypeToFloat() 1526 dist.Update() 1527 vol = vedo.Volume(dist.GetOutput()) 1528 vol.name = "UnsignedDistanceVolume" 1529 vol.pipeline = utils.OperationNode( 1530 "unsigned_distance", parents=[self], c="#e9c46a:#0096c7") 1531 return vol 1532 1533 def smooth_data(self, 1534 niter=10, relaxation_factor=0.1, strategy=0, mask=None, 1535 exclude=("Normals", "TextureCoordinates"), 1536 ) -> Self: 1537 """ 1538 Smooth point attribute data using distance weighted Laplacian kernel. 1539 1540 The effect is to blur regions of high variation and emphasize low variation regions. 1541 1542 Arguments: 1543 niter : (int) 1544 number of iterations 1545 relaxation_factor : (float) 1546 relaxation factor controlling the amount of Laplacian smoothing applied 1547 strategy : (int) 1548 strategy to use for Laplacian smoothing 1549 - 0: use all points, all point data attributes are smoothed 1550 - 1: smooth all point attribute data except those on the boundary 1551 - 2: only point data connected to a boundary point are smoothed 1552 mask : (str, np.ndarray) 1553 array to be used as a mask (ignore then the strategy keyword) 1554 exclude : (list) 1555 list of arrays to be excluded from smoothing 1556 """ 1557 try: 1558 saf = vtki.new("AttributeSmoothingFilter") 1559 except: 1560 vedo.logger.error("smooth_data() only avaialble in VTK>=9.3.0") 1561 return self 1562 saf.SetInputData(self.dataset) 1563 saf.SetRelaxationFactor(relaxation_factor) 1564 saf.SetNumberOfIterations(niter) 1565 1566 for ex in exclude: 1567 saf.AddExcludedArray(ex) 1568 1569 saf.SetWeightsTypeToDistance2() 1570 1571 saf.SetSmoothingStrategy(strategy) 1572 if mask is not None: 1573 saf.SetSmoothingStrategyToSmoothingMask() 1574 if isinstance(mask, str): 1575 mask_ = self.dataset.GetPointData().GetArray(mask) 1576 if not mask_: 1577 vedo.logger.error(f"smooth_data(): mask array {mask} not found") 1578 return self 1579 mask_array = vtki.vtkUnsignedCharArray() 1580 mask_array.ShallowCopy(mask_) 1581 mask_array.SetName(mask_.GetName()) 1582 else: 1583 mask_array = utils.numpy2vtk(mask, dtype=np.uint8) 1584 saf.SetSmoothingMask(mask_array) 1585 1586 saf.Update() 1587 1588 self._update(saf.GetOutput()) 1589 self.pipeline = utils.OperationNode( 1590 "smooth_data", comment=f"strategy {strategy}", parents=[self], c="#9e2a2b" 1591 ) 1592 return self 1593 1594 def compute_streamlines( 1595 self, 1596 seeds: Any, 1597 integrator="rk4", 1598 direction="forward", 1599 initial_step_size=None, 1600 max_propagation=None, 1601 max_steps=10000, 1602 step_length=0, 1603 surface_constrained=False, 1604 compute_vorticity=False, 1605 ) -> Union["vedo.Lines", None]: 1606 """ 1607 Integrate a vector field to generate streamlines. 1608 1609 Arguments: 1610 seeds : (Mesh, Points, list) 1611 starting points of the streamlines 1612 integrator : (str) 1613 type of integration method to be used: 1614 - "rk2" (Runge-Kutta 2) 1615 - "rk4" (Runge-Kutta 4) 1616 - "rk45" (Runge-Kutta 45) 1617 direction : (str) 1618 direction of integration, either "forward", "backward" or "both" 1619 initial_step_size : (float) 1620 initial step size used for line integration 1621 max_propagation : (float) 1622 maximum length of a streamline expressed in absolute units 1623 max_steps : (int) 1624 maximum number of steps for a streamline 1625 step_length : (float) 1626 maximum length of a step expressed in absolute units 1627 surface_constrained : (bool) 1628 whether to stop integrating when the streamline leaves the surface 1629 compute_vorticity : (bool) 1630 whether to compute the vorticity at each streamline point 1631 """ 1632 b = self.dataset.GetBounds() 1633 size = (b[5]-b[4] + b[3]-b[2] + b[1]-b[0]) / 3 1634 if initial_step_size is None: 1635 initial_step_size = size / 1000.0 1636 1637 if max_propagation is None: 1638 max_propagation = size * 2 1639 1640 if utils.is_sequence(seeds): 1641 seeds = vedo.Points(seeds) 1642 1643 sti = vtki.new("StreamTracer") 1644 sti.SetSourceData(seeds.dataset) 1645 if isinstance(self, vedo.RectilinearGrid): 1646 sti.SetInputData(vedo.UnstructuredGrid(self.dataset).dataset) 1647 else: 1648 sti.SetInputDataObject(self.dataset) 1649 1650 sti.SetInitialIntegrationStep(initial_step_size) 1651 sti.SetComputeVorticity(compute_vorticity) 1652 sti.SetMaximumNumberOfSteps(max_steps) 1653 sti.SetMaximumPropagation(max_propagation) 1654 sti.SetSurfaceStreamlines(surface_constrained) 1655 if step_length: 1656 sti.SetMaximumIntegrationStep(step_length) 1657 1658 if "for" in direction: 1659 sti.SetIntegrationDirectionToForward() 1660 elif "back" in direction: 1661 sti.SetIntegrationDirectionToBackward() 1662 elif "both" in direction: 1663 sti.SetIntegrationDirectionToBoth() 1664 else: 1665 vedo.logger.error(f"in compute_streamlines(), unknown direction {direction}") 1666 return None 1667 1668 if integrator == "rk2": 1669 sti.SetIntegratorTypeToRungeKutta2() 1670 elif integrator == "rk4": 1671 sti.SetIntegratorTypeToRungeKutta4() 1672 elif integrator == "rk45": 1673 sti.SetIntegratorTypeToRungeKutta45() 1674 else: 1675 vedo.logger.error(f"in compute_streamlines(), unknown integrator {integrator}") 1676 return None 1677 1678 sti.Update() 1679 1680 stlines = vedo.shapes.Lines(sti.GetOutput(), lw=4) 1681 stlines.name = "StreamLines" 1682 self.pipeline = utils.OperationNode( 1683 "compute_streamlines", comment=f"{integrator}", parents=[self, seeds], c="#9e2a2b" 1684 ) 1685 return stlines
Common algorithms.
380 @property 381 def pointdata(self): 382 """ 383 Create and/or return a `numpy.array` associated to points (vertices). 384 A data array can be indexed either as a string or by an integer number. 385 E.g.: `myobj.pointdata["arrayname"]` 386 387 Usage: 388 389 `myobj.pointdata.keys()` to return the available data array names 390 391 `myobj.pointdata.select(name)` to make this array the active one 392 393 `myobj.pointdata.remove(name)` to remove this array 394 """ 395 return DataArrayHelper(self, 0)
Create and/or return a numpy.array
associated to points (vertices).
A data array can be indexed either as a string or by an integer number.
E.g.: myobj.pointdata["arrayname"]
Usage:
myobj.pointdata.keys()
to return the available data array names
myobj.pointdata.select(name)
to make this array the active one
myobj.pointdata.remove(name)
to remove this array
397 @property 398 def celldata(self): 399 """ 400 Create and/or return a `numpy.array` associated to cells (faces). 401 A data array can be indexed either as a string or by an integer number. 402 E.g.: `myobj.celldata["arrayname"]` 403 404 Usage: 405 406 `myobj.celldata.keys()` to return the available data array names 407 408 `myobj.celldata.select(name)` to make this array the active one 409 410 `myobj.celldata.remove(name)` to remove this array 411 """ 412 return DataArrayHelper(self, 1)
Create and/or return a numpy.array
associated to cells (faces).
A data array can be indexed either as a string or by an integer number.
E.g.: myobj.celldata["arrayname"]
Usage:
myobj.celldata.keys()
to return the available data array names
myobj.celldata.select(name)
to make this array the active one
myobj.celldata.remove(name)
to remove this array
414 @property 415 def metadata(self): 416 """ 417 Create and/or return a `numpy.array` associated to neither cells nor faces. 418 A data array can be indexed either as a string or by an integer number. 419 E.g.: `myobj.metadata["arrayname"]` 420 421 Usage: 422 423 `myobj.metadata.keys()` to return the available data array names 424 425 `myobj.metadata.select(name)` to make this array the active one 426 427 `myobj.metadata.remove(name)` to remove this array 428 """ 429 return DataArrayHelper(self, 2)
Create and/or return a numpy.array
associated to neither cells nor faces.
A data array can be indexed either as a string or by an integer number.
E.g.: myobj.metadata["arrayname"]
Usage:
myobj.metadata.keys()
to return the available data array names
myobj.metadata.select(name)
to make this array the active one
myobj.metadata.remove(name)
to remove this array
431 def memory_address(self) -> int: 432 """ 433 Return a unique memory address integer which may serve as the ID of the 434 object, or passed to c++ code. 435 """ 436 # https://www.linkedin.com/pulse/speedup-your-code-accessing-python-vtk-objects-from-c-pletzer/ 437 # https://github.com/tfmoraes/polydata_connectivity 438 return int(self.dataset.GetAddressAsString("")[5:], 16)
Return a unique memory address integer which may serve as the ID of the object, or passed to c++ code.
440 def memory_size(self) -> int: 441 """Return the size in bytes of the object in memory.""" 442 return self.dataset.GetActualMemorySize()
Return the size in bytes of the object in memory.
444 def modified(self) -> Self: 445 """Use in conjunction with `tonumpy()` to update any modifications to the image array.""" 446 self.dataset.GetPointData().Modified() 447 scals = self.dataset.GetPointData().GetScalars() 448 if scals: 449 scals.Modified() 450 return self
Use in conjunction with tonumpy()
to update any modifications to the image array.
452 def box(self, scale=1, padding=0) -> "vedo.Mesh": 453 """ 454 Return the bounding box as a new `Mesh` object. 455 456 Arguments: 457 scale : (float) 458 box size can be scaled by a factor 459 padding : (float, list) 460 a constant padding can be added (can be a list `[padx,pady,padz]`) 461 """ 462 b = self.bounds() 463 if not utils.is_sequence(padding): 464 padding = [padding, padding, padding] 465 length, width, height = b[1] - b[0], b[3] - b[2], b[5] - b[4] 466 tol = (length + width + height) / 30000 # useful for boxing text 467 pos = [(b[0] + b[1]) / 2, (b[3] + b[2]) / 2, (b[5] + b[4]) / 2 - tol] 468 bx = vedo.shapes.Box( 469 pos, 470 length * scale + padding[0], 471 width * scale + padding[1], 472 height * scale + padding[2], 473 c="gray", 474 ) 475 try: 476 pr = vtki.vtkProperty() 477 pr.DeepCopy(self.properties) 478 bx.actor.SetProperty(pr) 479 bx.properties = pr 480 except (AttributeError, TypeError): 481 pass 482 bx.flat().lighting("off").wireframe(True) 483 return bx
Return the bounding box as a new Mesh
object.
Arguments:
- scale : (float) box size can be scaled by a factor
- padding : (float, list)
a constant padding can be added (can be a list
[padx,pady,padz]
)
485 def update_dataset(self, dataset, **kwargs) -> Self: 486 """Update the dataset of the object with the provided VTK dataset.""" 487 self._update(dataset, **kwargs) 488 return self
Update the dataset of the object with the provided VTK dataset.
490 def bounds(self) -> np.ndarray: 491 """ 492 Get the object bounds. 493 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 494 """ 495 try: # this is very slow for large meshes 496 pts = self.vertices 497 xmin, ymin, zmin = np.min(pts, axis=0) 498 xmax, ymax, zmax = np.max(pts, axis=0) 499 return np.array([xmin, xmax, ymin, ymax, zmin, zmax]) 500 except (AttributeError, ValueError): 501 return np.array(self.dataset.GetBounds())
Get the object bounds.
Returns a list in format [xmin,xmax, ymin,ymax, zmin,zmax]
.
503 def xbounds(self, i=None) -> np.ndarray: 504 """Get the bounds `[xmin,xmax]`. Can specify upper or lower with i (0,1).""" 505 b = self.bounds() 506 if i is not None: 507 return b[i] 508 return np.array([b[0], b[1]])
Get the bounds [xmin,xmax]
. Can specify upper or lower with i (0,1).
510 def ybounds(self, i=None) -> np.ndarray: 511 """Get the bounds `[ymin,ymax]`. Can specify upper or lower with i (0,1).""" 512 b = self.bounds() 513 if i == 0: 514 return b[2] 515 if i == 1: 516 return b[3] 517 return np.array([b[2], b[3]])
Get the bounds [ymin,ymax]
. Can specify upper or lower with i (0,1).
519 def zbounds(self, i=None) -> np.ndarray: 520 """Get the bounds `[zmin,zmax]`. Can specify upper or lower with i (0,1).""" 521 b = self.bounds() 522 if i == 0: 523 return b[4] 524 if i == 1: 525 return b[5] 526 return np.array([b[4], b[5]])
Get the bounds [zmin,zmax]
. Can specify upper or lower with i (0,1).
528 def diagonal_size(self) -> float: 529 """Get the length of the diagonal of the bounding box.""" 530 b = self.bounds() 531 return np.sqrt((b[1] - b[0])**2 + (b[3] - b[2])**2 + (b[5] - b[4])**2)
Get the length of the diagonal of the bounding box.
533 def average_size(self) -> float: 534 """ 535 Calculate and return the average size of the object. 536 This is the mean of the vertex distances from the center of mass. 537 """ 538 coords = self.vertices 539 cm = np.mean(coords, axis=0) 540 if coords.shape[0] == 0: 541 return 0.0 542 cc = coords - cm 543 return np.mean(np.linalg.norm(cc, axis=1))
Calculate and return the average size of the object. This is the mean of the vertex distances from the center of mass.
545 def center_of_mass(self) -> np.ndarray: 546 """Get the center of mass of the object.""" 547 if isinstance(self, (vedo.RectilinearGrid, vedo.Volume)): 548 return np.array(self.dataset.GetCenter()) 549 cmf = vtki.new("CenterOfMass") 550 cmf.SetInputData(self.dataset) 551 cmf.Update() 552 c = cmf.GetCenter() 553 return np.array(c)
Get the center of mass of the object.
555 def copy_data_from(self, obj: Any) -> Self: 556 """Copy all data (point and cell data) from this input object""" 557 self.dataset.GetPointData().PassData(obj.dataset.GetPointData()) 558 self.dataset.GetCellData().PassData(obj.dataset.GetCellData()) 559 self.pipeline = utils.OperationNode( 560 "copy_data_from", 561 parents=[self, obj], 562 comment=f"{obj.__class__.__name__}", 563 shape="note", 564 c="#ccc5b9", 565 ) 566 return self
Copy all data (point and cell data) from this input object
568 def inputdata(self): 569 """Obsolete, use `.dataset` instead.""" 570 colors.printc("WARNING: 'inputdata()' is obsolete, use '.dataset' instead.", c="y") 571 return self.dataset
Obsolete, use .dataset
instead.
573 @property 574 def npoints(self): 575 """Retrieve the number of points (or vertices).""" 576 return self.dataset.GetNumberOfPoints()
Retrieve the number of points (or vertices).
578 @property 579 def nvertices(self): 580 """Retrieve the number of vertices (or points).""" 581 return self.dataset.GetNumberOfPoints()
Retrieve the number of vertices (or points).
583 @property 584 def ncells(self): 585 """Retrieve the number of cells.""" 586 return self.dataset.GetNumberOfCells()
Retrieve the number of cells.
588 def points(self, pts=None): 589 """Obsolete, use `self.vertices` or `self.coordinates` instead.""" 590 if pts is None: ### getter 591 592 if warnings["points_getter"]: 593 colors.printc(warnings["points_getter"], c="y") 594 warnings["points_getter"] = "" 595 return self.vertices 596 597 else: ### setter 598 599 if warnings["points_setter"]: 600 colors.printc(warnings["points_setter"], c="y") 601 warnings["points_setter"] = "" 602 603 pts = np.asarray(pts, dtype=np.float32) 604 605 if pts.ndim == 1: 606 ### getter by point index ################### 607 indices = pts.astype(int) 608 vpts = self.dataset.GetPoints() 609 arr = utils.vtk2numpy(vpts.GetData()) 610 return arr[indices] ########### 611 612 ### setter #################################### 613 if pts.shape[1] == 2: 614 pts = np.c_[pts, np.zeros(pts.shape[0], dtype=np.float32)] 615 arr = utils.numpy2vtk(pts, dtype=np.float32) 616 617 vpts = self.dataset.GetPoints() 618 vpts.SetData(arr) 619 vpts.Modified() 620 # reset mesh to identity matrix position/rotation: 621 self.point_locator = None 622 self.cell_locator = None 623 self.line_locator = None 624 self.transform = LinearTransform() 625 return self
Obsolete, use self.vertices
or self.coordinates
instead.
627 @property 628 def cell_centers(self): 629 """ 630 Get the coordinates of the cell centers. 631 632 Examples: 633 - [delaunay2d.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/delaunay2d.py) 634 635 See also: `CellCenters()`. 636 """ 637 vcen = vtki.new("CellCenters") 638 vcen.CopyArraysOff() 639 vcen.SetInputData(self.dataset) 640 vcen.Update() 641 return utils.vtk2numpy(vcen.GetOutput().GetPoints().GetData())
643 @property 644 def lines(self): 645 """ 646 Get lines connectivity ids as a python array 647 formatted as `[[id0,id1], [id3,id4], ...]` 648 649 See also: `lines_as_flat_array()`. 650 """ 651 # Get cell connettivity ids as a 1D array. The vtk format is: 652 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 653 try: 654 arr1d = utils.vtk2numpy(self.dataset.GetLines().GetData()) 655 except AttributeError: 656 return np.array([]) 657 i = 0 658 conn = [] 659 n = len(arr1d) 660 for _ in range(n): 661 cell = [arr1d[i + k + 1] for k in range(arr1d[i])] 662 conn.append(cell) 663 i += arr1d[i] + 1 664 if i >= n: 665 break 666 667 return conn # cannot always make a numpy array of it!
Get lines connectivity ids as a python array
formatted as [[id0,id1], [id3,id4], ...]
See also: lines_as_flat_array()
.
669 @property 670 def lines_as_flat_array(self): 671 """ 672 Get lines connectivity ids as a 1D numpy array. 673 Format is e.g. [2, 10,20, 3, 10,11,12, 2, 70,80, ...] 674 675 See also: `lines()`. 676 """ 677 try: 678 return utils.vtk2numpy(self.dataset.GetLines().GetData()) 679 except AttributeError: 680 return np.array([])
Get lines connectivity ids as a 1D numpy array. Format is e.g. [2, 10,20, 3, 10,11,12, 2, 70,80, ...]
See also: lines()
.
682 def mark_boundaries(self) -> Self: 683 """ 684 Mark cells and vertices if they lie on a boundary. 685 A new array called `BoundaryCells` is added to the object. 686 """ 687 mb = vtki.new("MarkBoundaryFilter") 688 mb.SetInputData(self.dataset) 689 mb.Update() 690 self.dataset.DeepCopy(mb.GetOutput()) 691 self.pipeline = utils.OperationNode("mark_boundaries", parents=[self]) 692 return self
Mark cells and vertices if they lie on a boundary.
A new array called BoundaryCells
is added to the object.
694 def find_cells_in_bounds(self, xbounds=(), ybounds=(), zbounds=()) -> np.ndarray: 695 """ 696 Find cells that are within the specified bounds. 697 """ 698 try: 699 xbounds = list(xbounds.bounds()) 700 except AttributeError: 701 pass 702 703 if len(xbounds) == 6: 704 bnds = xbounds 705 else: 706 bnds = list(self.bounds()) 707 if len(xbounds) == 2: 708 bnds[0] = xbounds[0] 709 bnds[1] = xbounds[1] 710 if len(ybounds) == 2: 711 bnds[2] = ybounds[0] 712 bnds[3] = ybounds[1] 713 if len(zbounds) == 2: 714 bnds[4] = zbounds[0] 715 bnds[5] = zbounds[1] 716 717 cell_ids = vtki.vtkIdList() 718 if not self.cell_locator: 719 self.cell_locator = vtki.new("CellTreeLocator") 720 self.cell_locator.SetDataSet(self.dataset) 721 self.cell_locator.BuildLocator() 722 self.cell_locator.FindCellsWithinBounds(bnds, cell_ids) 723 cids = [] 724 for i in range(cell_ids.GetNumberOfIds()): 725 cid = cell_ids.GetId(i) 726 cids.append(cid) 727 return np.array(cids)
Find cells that are within the specified bounds.
729 def find_cells_along_line(self, p0, p1, tol=0.001) -> np.ndarray: 730 """ 731 Find cells that are intersected by a line segment. 732 """ 733 cell_ids = vtki.vtkIdList() 734 if not self.cell_locator: 735 self.cell_locator = vtki.new("CellTreeLocator") 736 self.cell_locator.SetDataSet(self.dataset) 737 self.cell_locator.BuildLocator() 738 self.cell_locator.FindCellsAlongLine(p0, p1, tol, cell_ids) 739 cids = [] 740 for i in range(cell_ids.GetNumberOfIds()): 741 cid = cell_ids.GetId(i) 742 cids.append(cid) 743 return np.array(cids)
Find cells that are intersected by a line segment.
745 def find_cells_along_plane(self, origin, normal, tol=0.001) -> np.ndarray: 746 """ 747 Find cells that are intersected by a plane. 748 """ 749 cell_ids = vtki.vtkIdList() 750 if not self.cell_locator: 751 self.cell_locator = vtki.new("CellTreeLocator") 752 self.cell_locator.SetDataSet(self.dataset) 753 self.cell_locator.BuildLocator() 754 self.cell_locator.FindCellsAlongPlane(origin, normal, tol, cell_ids) 755 cids = [] 756 for i in range(cell_ids.GetNumberOfIds()): 757 cid = cell_ids.GetId(i) 758 cids.append(cid) 759 return np.array(cids)
Find cells that are intersected by a plane.
761 def keep_cell_types(self, types=()): 762 """ 763 Extract cells of a specific type. 764 765 Check the VTK cell types here: 766 https://vtk.org/doc/nightly/html/vtkCellType_8h.html 767 """ 768 fe = vtki.new("ExtractCellsByType") 769 fe.SetInputData(self.dataset) 770 for t in types: 771 try: 772 if utils.is_integer(t): 773 it = t 774 else: 775 it = vtki.cell_types[t.upper()] 776 except KeyError: 777 vedo.logger.error(f"Cell type '{t}' not recognized") 778 continue 779 fe.AddCellType(it) 780 fe.Update() 781 self._update(fe.GetOutput()) 782 return self
Extract cells of a specific type.
Check the VTK cell types here: https://vtk.org/doc/nightly/html/vtkCellType_8h.html
784 def map_cells_to_points(self, arrays=(), move=False) -> Self: 785 """ 786 Interpolate cell data (i.e., data specified per cell or face) 787 into point data (i.e., data specified at each vertex). 788 The method of transformation is based on averaging the data values 789 of all cells using a particular point. 790 791 A custom list of arrays to be mapped can be passed in input. 792 793 Set `move=True` to delete the original `celldata` array. 794 """ 795 c2p = vtki.new("CellDataToPointData") 796 c2p.SetInputData(self.dataset) 797 if not move: 798 c2p.PassCellDataOn() 799 if arrays: 800 c2p.ClearCellDataArrays() 801 c2p.ProcessAllArraysOff() 802 for arr in arrays: 803 c2p.AddCellDataArray(arr) 804 else: 805 c2p.ProcessAllArraysOn() 806 c2p.Update() 807 self._update(c2p.GetOutput(), reset_locators=False) 808 self.mapper.SetScalarModeToUsePointData() 809 self.pipeline = utils.OperationNode("map_cells_to_points", parents=[self]) 810 return self
Interpolate cell data (i.e., data specified per cell or face) into point data (i.e., data specified at each vertex). The method of transformation is based on averaging the data values of all cells using a particular point.
A custom list of arrays to be mapped can be passed in input.
Set move=True
to delete the original celldata
array.
812 @property 813 def vertices(self): 814 """Return the vertices (points) coordinates.""" 815 try: 816 # for polydata and unstructured grid 817 varr = self.dataset.GetPoints().GetData() 818 except (AttributeError, TypeError): 819 try: 820 # for RectilinearGrid, StructuredGrid 821 vpts = vtki.vtkPoints() 822 self.dataset.GetPoints(vpts) 823 varr = vpts.GetData() 824 except (AttributeError, TypeError): 825 try: 826 # for ImageData 827 v2p = vtki.new("ImageToPoints") 828 v2p.SetInputData(self.dataset) 829 v2p.Update() 830 varr = v2p.GetOutput().GetPoints().GetData() 831 except AttributeError: 832 return np.array([]) 833 834 return utils.vtk2numpy(varr)
Return the vertices (points) coordinates.
855 @property 856 def coordinates(self): 857 """Return the vertices (points) coordinates. Same as `vertices`.""" 858 return self.vertices
Return the vertices (points) coordinates. Same as vertices
.
865 @property 866 def cells_as_flat_array(self): 867 """ 868 Get cell connectivity ids as a 1D numpy array. 869 Format is e.g. [3, 10,20,30 4, 10,11,12,13 ...] 870 """ 871 try: 872 # valid for unstructured grid 873 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 874 except AttributeError: 875 # valid for polydata 876 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 877 return arr1d
Get cell connectivity ids as a 1D numpy array. Format is e.g. [3, 10,20,30 4, 10,11,12,13 ...]
879 @property 880 def cells(self): 881 """ 882 Get the cells connectivity ids as a numpy array. 883 884 The output format is: `[[id0 ... idn], [id0 ... idm], etc]`. 885 """ 886 try: 887 # valid for unstructured grid 888 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 889 except AttributeError: 890 try: 891 # valid for polydata 892 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 893 except AttributeError: 894 vedo.logger.warning(f"Cannot get cells for {type(self)}") 895 return np.array([]) 896 897 # Get cell connettivity ids as a 1D array. vtk format is: 898 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 899 i = 0 900 conn = [] 901 n = len(arr1d) 902 if n: 903 while True: 904 cell = [arr1d[i + k] for k in range(1, arr1d[i] + 1)] 905 conn.append(cell) 906 i += arr1d[i] + 1 907 if i >= n: 908 break 909 return conn
Get the cells connectivity ids as a numpy array.
The output format is: [[id0 ... idn], [id0 ... idm], etc]
.
911 def map_points_to_cells(self, arrays=(), move=False) -> Self: 912 """ 913 Interpolate point data (i.e., data specified per point or vertex) 914 into cell data (i.e., data specified per cell). 915 The method of transformation is based on averaging the data values 916 of all points defining a particular cell. 917 918 A custom list of arrays to be mapped can be passed in input. 919 920 Set `move=True` to delete the original `pointdata` array. 921 922 Examples: 923 - [mesh_map2cell.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/mesh_map2cell.py) 924 """ 925 p2c = vtki.new("PointDataToCellData") 926 p2c.SetInputData(self.dataset) 927 if not move: 928 p2c.PassPointDataOn() 929 if arrays: 930 p2c.ClearPointDataArrays() 931 p2c.ProcessAllArraysOff() 932 for arr in arrays: 933 p2c.AddPointDataArray(arr) 934 else: 935 p2c.ProcessAllArraysOn() 936 p2c.Update() 937 self._update(p2c.GetOutput(), reset_locators=False) 938 self.mapper.SetScalarModeToUseCellData() 939 self.pipeline = utils.OperationNode("map_points_to_cells", parents=[self]) 940 return self
Interpolate point data (i.e., data specified per point or vertex) into cell data (i.e., data specified per cell). The method of transformation is based on averaging the data values of all points defining a particular cell.
A custom list of arrays to be mapped can be passed in input.
Set move=True
to delete the original pointdata
array.
Examples:
942 def resample_data_from(self, source, tol=None, categorical=False) -> Self: 943 """ 944 Resample point and cell data from another dataset. 945 The output has the same structure but its point data have 946 the resampled values from target. 947 948 Use `tol` to set the tolerance used to compute whether 949 a point in the source is in a cell of the current object. 950 Points without resampled values, and their cells, are marked as blank. 951 If the data is categorical, then the resulting data will be determined 952 by a nearest neighbor interpolation scheme. 953 954 Example: 955 ```python 956 from vedo import * 957 m1 = Mesh(dataurl+'bunny.obj')#.add_gaussian_noise(0.1) 958 pts = m1.vertices 959 ces = m1.cell_centers 960 m1.pointdata["xvalues"] = np.power(pts[:,0], 3) 961 m1.celldata["yvalues"] = np.power(ces[:,1], 3) 962 m2 = Mesh(dataurl+'bunny.obj') 963 m2.resample_data_from(m1) 964 # print(m2.pointdata["xvalues"]) 965 show(m1, m2 , N=2, axes=1) 966 ``` 967 """ 968 rs = vtki.new("ResampleWithDataSet") 969 rs.SetInputData(self.dataset) 970 rs.SetSourceData(source.dataset) 971 972 rs.SetPassPointArrays(True) 973 rs.SetPassCellArrays(True) 974 rs.SetPassFieldArrays(True) 975 rs.SetCategoricalData(categorical) 976 977 rs.SetComputeTolerance(True) 978 if tol: 979 rs.SetComputeTolerance(False) 980 rs.SetTolerance(tol) 981 rs.Update() 982 self._update(rs.GetOutput(), reset_locators=False) 983 self.pipeline = utils.OperationNode( 984 "resample_data_from", 985 comment=f"{source.__class__.__name__}", 986 parents=[self, source], 987 ) 988 return self
Resample point and cell data from another dataset. The output has the same structure but its point data have the resampled values from target.
Use tol
to set the tolerance used to compute whether
a point in the source is in a cell of the current object.
Points without resampled values, and their cells, are marked as blank.
If the data is categorical, then the resulting data will be determined
by a nearest neighbor interpolation scheme.
Example:
from vedo import *
m1 = Mesh(dataurl+'bunny.obj')#.add_gaussian_noise(0.1)
pts = m1.vertices
ces = m1.cell_centers
m1.pointdata["xvalues"] = np.power(pts[:,0], 3)
m1.celldata["yvalues"] = np.power(ces[:,1], 3)
m2 = Mesh(dataurl+'bunny.obj')
m2.resample_data_from(m1)
# print(m2.pointdata["xvalues"])
show(m1, m2 , N=2, axes=1)
990 def interpolate_data_from( 991 self, 992 source, 993 radius=None, 994 n=None, 995 kernel="shepard", 996 exclude=("Normals",), 997 on="points", 998 null_strategy=1, 999 null_value=0, 1000 ) -> Self: 1001 """ 1002 Interpolate over source to port its data onto the current object using various kernels. 1003 1004 If n (number of closest points to use) is set then radius value is ignored. 1005 1006 Check out also: 1007 `probe()` which in many cases can be faster. 1008 1009 Arguments: 1010 kernel : (str) 1011 available kernels are [shepard, gaussian, linear] 1012 null_strategy : (int) 1013 specify a strategy to use when encountering a "null" point 1014 during the interpolation process. Null points occur when the local neighborhood 1015 (of nearby points to interpolate from) is empty. 1016 1017 - Case 0: an output array is created that marks points 1018 as being valid (=1) or null (invalid =0), and the null_value is set as well 1019 - Case 1: the output data value(s) are set to the provided null_value 1020 - Case 2: simply use the closest point to perform the interpolation. 1021 null_value : (float) 1022 see above. 1023 1024 Examples: 1025 - [interpolate_scalar1.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar1.py) 1026 - [interpolate_scalar3.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar3.py) 1027 - [interpolate_scalar4.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar4.py) 1028 - [image_probe.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/image_probe.py) 1029 1030 ![](https://vedo.embl.es/images/advanced/interpolateMeshArray.png) 1031 """ 1032 if radius is None and not n: 1033 vedo.logger.error("in interpolate_data_from(): please set either radius or n") 1034 raise RuntimeError 1035 1036 if on == "points": 1037 points = source.dataset 1038 elif on == "cells": 1039 c2p = vtki.new("CellDataToPointData") 1040 c2p.SetInputData(source.dataset) 1041 c2p.Update() 1042 points = c2p.GetOutput() 1043 else: 1044 vedo.logger.error("in interpolate_data_from(), on must be on points or cells") 1045 raise RuntimeError() 1046 1047 locator = vtki.new("PointLocator") 1048 locator.SetDataSet(points) 1049 locator.BuildLocator() 1050 1051 if kernel.lower() == "shepard": 1052 kern = vtki.new("ShepardKernel") 1053 kern.SetPowerParameter(2) 1054 elif kernel.lower() == "gaussian": 1055 kern = vtki.new("GaussianKernel") 1056 kern.SetSharpness(2) 1057 elif kernel.lower() == "linear": 1058 kern = vtki.new("LinearKernel") 1059 else: 1060 vedo.logger.error("available kernels are: [shepard, gaussian, linear]") 1061 raise RuntimeError() 1062 1063 if n: 1064 kern.SetNumberOfPoints(n) 1065 kern.SetKernelFootprintToNClosest() 1066 else: 1067 kern.SetRadius(radius) 1068 kern.SetKernelFootprintToRadius() 1069 1070 interpolator = vtki.new("PointInterpolator") 1071 interpolator.SetInputData(self.dataset) 1072 interpolator.SetSourceData(points) 1073 interpolator.SetKernel(kern) 1074 interpolator.SetLocator(locator) 1075 interpolator.PassFieldArraysOn() 1076 interpolator.SetNullPointsStrategy(null_strategy) 1077 interpolator.SetNullValue(null_value) 1078 interpolator.SetValidPointsMaskArrayName("ValidPointMask") 1079 for ex in exclude: 1080 interpolator.AddExcludedArray(ex) 1081 1082 # remove arrays that are already present in the source 1083 # this is because the interpolator will ignore them otherwise 1084 for i in range(self.dataset.GetPointData().GetNumberOfArrays()): 1085 name = self.dataset.GetPointData().GetArrayName(i) 1086 if name not in exclude: 1087 self.dataset.GetPointData().RemoveArray(name) 1088 1089 interpolator.Update() 1090 1091 if on == "cells": 1092 p2c = vtki.new("PointDataToCellData") 1093 p2c.SetInputData(interpolator.GetOutput()) 1094 p2c.Update() 1095 cpoly = p2c.GetOutput() 1096 else: 1097 cpoly = interpolator.GetOutput() 1098 1099 self._update(cpoly, reset_locators=False) 1100 1101 self.pipeline = utils.OperationNode("interpolate_data_from", parents=[self, source]) 1102 return self
Interpolate over source to port its data onto the current object using various kernels.
If n (number of closest points to use) is set then radius value is ignored.
Check out also:
probe()
which in many cases can be faster.
Arguments:
- kernel : (str) available kernels are [shepard, gaussian, linear]
null_strategy : (int) specify a strategy to use when encountering a "null" point during the interpolation process. Null points occur when the local neighborhood (of nearby points to interpolate from) is empty.
- Case 0: an output array is created that marks points as being valid (=1) or null (invalid =0), and the null_value is set as well
- Case 1: the output data value(s) are set to the provided null_value
- Case 2: simply use the closest point to perform the interpolation.
- null_value : (float) see above.
Examples:
1104 def add_ids(self) -> Self: 1105 """ 1106 Generate point and cell ids arrays. 1107 1108 Two new arrays are added to the mesh: `PointID` and `CellID`. 1109 """ 1110 ids = vtki.new("IdFilter") 1111 ids.SetInputData(self.dataset) 1112 ids.PointIdsOn() 1113 ids.CellIdsOn() 1114 ids.FieldDataOff() 1115 ids.SetPointIdsArrayName("PointID") 1116 ids.SetCellIdsArrayName("CellID") 1117 ids.Update() 1118 self._update(ids.GetOutput(), reset_locators=False) 1119 self.pipeline = utils.OperationNode("add_ids", parents=[self]) 1120 return self
Generate point and cell ids arrays.
Two new arrays are added to the mesh: PointID
and CellID
.
1122 def gradient(self, input_array=None, on="points", fast=False) -> np.ndarray: 1123 """ 1124 Compute and return the gradiend of the active scalar field as a numpy array. 1125 1126 Arguments: 1127 input_array : (str) 1128 array of the scalars to compute the gradient, 1129 if None the current active array is selected 1130 on : (str) 1131 compute either on 'points' or 'cells' data 1132 fast : (bool) 1133 if True, will use a less accurate algorithm 1134 that performs fewer derivative calculations (and is therefore faster). 1135 1136 Examples: 1137 - [isolines.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/isolines.py) 1138 1139 ![](https://user-images.githubusercontent.com/32848391/72433087-f00a8780-3798-11ea-9778-991f0abeca70.png) 1140 """ 1141 gra = vtki.new("GradientFilter") 1142 if on.startswith("p"): 1143 varr = self.dataset.GetPointData() 1144 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1145 elif on.startswith("c"): 1146 varr = self.dataset.GetCellData() 1147 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1148 else: 1149 vedo.logger.error(f"in gradient: unknown option {on}") 1150 raise RuntimeError 1151 1152 if input_array is None: 1153 if varr.GetScalars(): 1154 input_array = varr.GetScalars().GetName() 1155 else: 1156 vedo.logger.error(f"in gradient: no scalars found for {on}") 1157 raise RuntimeError 1158 1159 gra.SetInputData(self.dataset) 1160 gra.SetInputScalars(tp, input_array) 1161 gra.SetResultArrayName("Gradient") 1162 gra.SetFasterApproximation(fast) 1163 gra.ComputeDivergenceOff() 1164 gra.ComputeVorticityOff() 1165 gra.ComputeGradientOn() 1166 gra.Update() 1167 if on.startswith("p"): 1168 gvecs = utils.vtk2numpy(gra.GetOutput().GetPointData().GetArray("Gradient")) 1169 else: 1170 gvecs = utils.vtk2numpy(gra.GetOutput().GetCellData().GetArray("Gradient")) 1171 return gvecs
Compute and return the gradiend of the active scalar field as a numpy array.
Arguments:
- input_array : (str) array of the scalars to compute the gradient, if None the current active array is selected
- on : (str) compute either on 'points' or 'cells' data
- fast : (bool) if True, will use a less accurate algorithm that performs fewer derivative calculations (and is therefore faster).
Examples:
1173 def divergence(self, array_name=None, on="points", fast=False) -> np.ndarray: 1174 """ 1175 Compute and return the divergence of a vector field as a numpy array. 1176 1177 Arguments: 1178 array_name : (str) 1179 name of the array of vectors to compute the divergence, 1180 if None the current active array is selected 1181 on : (str) 1182 compute either on 'points' or 'cells' data 1183 fast : (bool) 1184 if True, will use a less accurate algorithm 1185 that performs fewer derivative calculations (and is therefore faster). 1186 """ 1187 div = vtki.new("GradientFilter") 1188 if on.startswith("p"): 1189 varr = self.dataset.GetPointData() 1190 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1191 elif on.startswith("c"): 1192 varr = self.dataset.GetCellData() 1193 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1194 else: 1195 vedo.logger.error(f"in divergence(): unknown option {on}") 1196 raise RuntimeError 1197 1198 if array_name is None: 1199 if varr.GetVectors(): 1200 array_name = varr.GetVectors().GetName() 1201 else: 1202 vedo.logger.error(f"in divergence(): no vectors found for {on}") 1203 raise RuntimeError 1204 1205 div.SetInputData(self.dataset) 1206 div.SetInputScalars(tp, array_name) 1207 div.ComputeDivergenceOn() 1208 div.ComputeGradientOff() 1209 div.ComputeVorticityOff() 1210 div.SetDivergenceArrayName("Divergence") 1211 div.SetFasterApproximation(fast) 1212 div.Update() 1213 if on.startswith("p"): 1214 dvecs = utils.vtk2numpy(div.GetOutput().GetPointData().GetArray("Divergence")) 1215 else: 1216 dvecs = utils.vtk2numpy(div.GetOutput().GetCellData().GetArray("Divergence")) 1217 return dvecs
Compute and return the divergence of a vector field as a numpy array.
Arguments:
- array_name : (str) name of the array of vectors to compute the divergence, if None the current active array is selected
- on : (str) compute either on 'points' or 'cells' data
- fast : (bool) if True, will use a less accurate algorithm that performs fewer derivative calculations (and is therefore faster).
1219 def vorticity(self, array_name=None, on="points", fast=False) -> np.ndarray: 1220 """ 1221 Compute and return the vorticity of a vector field as a numpy array. 1222 1223 Arguments: 1224 array_name : (str) 1225 name of the array to compute the vorticity, 1226 if None the current active array is selected 1227 on : (str) 1228 compute either on 'points' or 'cells' data 1229 fast : (bool) 1230 if True, will use a less accurate algorithm 1231 that performs fewer derivative calculations (and is therefore faster). 1232 """ 1233 vort = vtki.new("GradientFilter") 1234 if on.startswith("p"): 1235 varr = self.dataset.GetPointData() 1236 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1237 elif on.startswith("c"): 1238 varr = self.dataset.GetCellData() 1239 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1240 else: 1241 vedo.logger.error(f"in vorticity(): unknown option {on}") 1242 raise RuntimeError 1243 1244 if array_name is None: 1245 if varr.GetVectors(): 1246 array_name = varr.GetVectors().GetName() 1247 else: 1248 vedo.logger.error(f"in vorticity(): no vectors found for {on}") 1249 raise RuntimeError 1250 1251 vort.SetInputData(self.dataset) 1252 vort.SetInputScalars(tp, array_name) 1253 vort.ComputeDivergenceOff() 1254 vort.ComputeGradientOff() 1255 vort.ComputeVorticityOn() 1256 vort.SetVorticityArrayName("Vorticity") 1257 vort.SetFasterApproximation(fast) 1258 vort.Update() 1259 if on.startswith("p"): 1260 vvecs = utils.vtk2numpy(vort.GetOutput().GetPointData().GetArray("Vorticity")) 1261 else: 1262 vvecs = utils.vtk2numpy(vort.GetOutput().GetCellData().GetArray("Vorticity")) 1263 return vvecs
Compute and return the vorticity of a vector field as a numpy array.
Arguments:
- array_name : (str) name of the array to compute the vorticity, if None the current active array is selected
- on : (str) compute either on 'points' or 'cells' data
- fast : (bool) if True, will use a less accurate algorithm that performs fewer derivative calculations (and is therefore faster).
1265 def probe( 1266 self, 1267 source, 1268 categorical=False, 1269 snap=False, 1270 tol=0, 1271 ) -> Self: 1272 """ 1273 Takes a data set and probes its scalars at the specified points in space. 1274 1275 Note that a mask is also output with valid/invalid points which can be accessed 1276 with `mesh.pointdata['ValidPointMask']`. 1277 1278 Arguments: 1279 source : any dataset 1280 the data set to probe. 1281 categorical : bool 1282 control whether the source pointdata is to be treated as categorical. 1283 snap : bool 1284 snap to the cell with the closest point if no cell was found 1285 tol : float 1286 the tolerance to use when performing the probe. 1287 1288 Check out also: 1289 `interpolate_data_from()` and `tovolume()` 1290 1291 Examples: 1292 - [probe_points.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/probe_points.py) 1293 1294 ![](https://vedo.embl.es/images/volumetric/probePoints.png) 1295 """ 1296 probe_filter = vtki.new("ProbeFilter") 1297 probe_filter.SetSourceData(source.dataset) 1298 probe_filter.SetInputData(self.dataset) 1299 probe_filter.PassCellArraysOn() 1300 probe_filter.PassFieldArraysOn() 1301 probe_filter.PassPointArraysOn() 1302 probe_filter.SetCategoricalData(categorical) 1303 probe_filter.ComputeToleranceOff() 1304 if tol: 1305 probe_filter.ComputeToleranceOn() 1306 probe_filter.SetTolerance(tol) 1307 probe_filter.SetSnapToCellWithClosestPoint(snap) 1308 probe_filter.Update() 1309 self._update(probe_filter.GetOutput(), reset_locators=False) 1310 self.pipeline = utils.OperationNode("probe", parents=[self, source]) 1311 self.pointdata.rename("vtkValidPointMask", "ValidPointMask") 1312 return self
Takes a data set and probes its scalars at the specified points in space.
Note that a mask is also output with valid/invalid points which can be accessed
with mesh.pointdata['ValidPointMask']
.
Arguments:
- source : any dataset the data set to probe.
- categorical : bool control whether the source pointdata is to be treated as categorical.
- snap : bool snap to the cell with the closest point if no cell was found
- tol : float the tolerance to use when performing the probe.
Check out also:
interpolate_data_from()
andtovolume()
Examples:
1314 def compute_cell_size(self) -> Self: 1315 """ 1316 Add to this object a cell data array 1317 containing the area, volume and edge length 1318 of the cells (when applicable to the object type). 1319 1320 Array names are: `Area`, `Volume`, `Length`. 1321 """ 1322 csf = vtki.new("CellSizeFilter") 1323 csf.SetInputData(self.dataset) 1324 csf.SetComputeArea(1) 1325 csf.SetComputeVolume(1) 1326 csf.SetComputeLength(1) 1327 csf.SetComputeVertexCount(0) 1328 csf.SetAreaArrayName("Area") 1329 csf.SetVolumeArrayName("Volume") 1330 csf.SetLengthArrayName("Length") 1331 csf.Update() 1332 self._update(csf.GetOutput(), reset_locators=False) 1333 return self
Add to this object a cell data array containing the area, volume and edge length of the cells (when applicable to the object type).
Array names are: Area
, Volume
, Length
.
1335 def generate_random_data(self) -> Self: 1336 """Fill a dataset with random attributes""" 1337 gen = vtki.new("RandomAttributeGenerator") 1338 gen.SetInputData(self.dataset) 1339 gen.GenerateAllDataOn() 1340 gen.SetDataTypeToFloat() 1341 gen.GeneratePointNormalsOff() 1342 gen.GeneratePointTensorsOn() 1343 gen.GenerateCellScalarsOn() 1344 gen.Update() 1345 self._update(gen.GetOutput(), reset_locators=False) 1346 self.pipeline = utils.OperationNode("generate_random_data", parents=[self]) 1347 return self
Fill a dataset with random attributes
1349 def integrate_data(self) -> dict: 1350 """ 1351 Integrate point and cell data arrays while computing length, 1352 area or volume of the domain. It works for 1D, 2D or 3D cells. 1353 1354 For volumetric datasets, this filter ignores all but 3D cells. 1355 It will not compute the volume contained in a closed surface. 1356 1357 Returns a dictionary with keys: `pointdata`, `celldata`, `metadata`, 1358 which contain the integration result for the corresponding attributes. 1359 1360 Examples: 1361 ```python 1362 from vedo import * 1363 surf = Sphere(res=100) 1364 surf.pointdata['scalars'] = np.ones(surf.npoints) 1365 data = surf.integrate_data() 1366 print(data['pointdata']['scalars'], "is equal to 4pi", 4*np.pi) 1367 ``` 1368 1369 ```python 1370 from vedo import * 1371 1372 xcoords1 = np.arange(0, 2.2, 0.2) 1373 xcoords2 = sqrt(np.arange(0, 4.2, 0.2)) 1374 1375 ycoords = np.arange(0, 1.2, 0.2) 1376 1377 surf1 = Grid(s=(xcoords1, ycoords)).rotate_y(-45).lw(2) 1378 surf2 = Grid(s=(xcoords2, ycoords)).rotate_y(-45).lw(2) 1379 1380 surf1.pointdata['scalars'] = surf1.vertices[:,2] 1381 surf2.pointdata['scalars'] = surf2.vertices[:,2] 1382 1383 data1 = surf1.integrate_data() 1384 data2 = surf2.integrate_data() 1385 1386 print(data1['pointdata']['scalars'], 1387 "is equal to", 1388 data2['pointdata']['scalars'], 1389 "even if the grids are different!", 1390 "(= the volume under the surface)" 1391 ) 1392 show(surf1, surf2, N=2, axes=1).close() 1393 ``` 1394 """ 1395 vinteg = vtki.new("IntegrateAttributes") 1396 vinteg.SetInputData(self.dataset) 1397 vinteg.Update() 1398 ugrid = vedo.UnstructuredGrid(vinteg.GetOutput()) 1399 data = dict( 1400 pointdata=ugrid.pointdata.todict(), 1401 celldata=ugrid.celldata.todict(), 1402 metadata=ugrid.metadata.todict(), 1403 ) 1404 return data
Integrate point and cell data arrays while computing length, area or volume of the domain. It works for 1D, 2D or 3D cells.
For volumetric datasets, this filter ignores all but 3D cells. It will not compute the volume contained in a closed surface.
Returns a dictionary with keys: pointdata
, celldata
, metadata
,
which contain the integration result for the corresponding attributes.
Examples:
from vedo import * surf = Sphere(res=100) surf.pointdata['scalars'] = np.ones(surf.npoints) data = surf.integrate_data() print(data['pointdata']['scalars'], "is equal to 4pi", 4*np.pi)
from vedo import * xcoords1 = np.arange(0, 2.2, 0.2) xcoords2 = sqrt(np.arange(0, 4.2, 0.2)) ycoords = np.arange(0, 1.2, 0.2) surf1 = Grid(s=(xcoords1, ycoords)).rotate_y(-45).lw(2) surf2 = Grid(s=(xcoords2, ycoords)).rotate_y(-45).lw(2) surf1.pointdata['scalars'] = surf1.vertices[:,2] surf2.pointdata['scalars'] = surf2.vertices[:,2] data1 = surf1.integrate_data() data2 = surf2.integrate_data() print(data1['pointdata']['scalars'], "is equal to", data2['pointdata']['scalars'], "even if the grids are different!", "(= the volume under the surface)" ) show(surf1, surf2, N=2, axes=1).close()
1406 def write(self, filename, binary=True) -> None: 1407 """Write object to file.""" 1408 out = vedo.file_io.write(self, filename, binary) 1409 out.pipeline = utils.OperationNode( 1410 "write", parents=[self], comment=filename[:15], shape="folder", c="#8a817c" 1411 )
Write object to file.
1413 def tomesh(self, bounds=(), shrink=0) -> "vedo.Mesh": 1414 """ 1415 Extract boundary geometry from dataset (or convert data to polygonal type). 1416 1417 Two new arrays are added to the mesh: `OriginalCellIds` and `OriginalPointIds` 1418 to keep track of the original mesh elements. 1419 1420 Arguments: 1421 bounds : (list) 1422 specify a sub-region to extract 1423 shrink : (float) 1424 shrink the cells to a fraction of their original size 1425 """ 1426 geo = vtki.new("GeometryFilter") 1427 1428 if shrink: 1429 sf = vtki.new("ShrinkFilter") 1430 sf.SetInputData(self.dataset) 1431 sf.SetShrinkFactor(shrink) 1432 sf.Update() 1433 geo.SetInputData(sf.GetOutput()) 1434 else: 1435 geo.SetInputData(self.dataset) 1436 1437 geo.SetPassThroughCellIds(1) 1438 geo.SetPassThroughPointIds(1) 1439 geo.SetOriginalCellIdsName("OriginalCellIds") 1440 geo.SetOriginalPointIdsName("OriginalPointIds") 1441 geo.SetNonlinearSubdivisionLevel(1) 1442 # geo.MergingOff() # crashes on StructuredGrids 1443 if bounds: 1444 geo.SetExtent(bounds) 1445 geo.ExtentClippingOn() 1446 geo.Update() 1447 msh = vedo.mesh.Mesh(geo.GetOutput()) 1448 msh.pipeline = utils.OperationNode("tomesh", parents=[self], c="#9e2a2b") 1449 return msh
Extract boundary geometry from dataset (or convert data to polygonal type).
Two new arrays are added to the mesh: OriginalCellIds
and OriginalPointIds
to keep track of the original mesh elements.
Arguments:
- bounds : (list) specify a sub-region to extract
- shrink : (float) shrink the cells to a fraction of their original size
1451 def signed_distance(self, dims=(20, 20, 20), bounds=None, invert=False, max_radius=None) -> "vedo.Volume": 1452 """ 1453 Compute the `Volume` object whose voxels contains the signed distance from 1454 the object. The calling object must have "Normals" defined. 1455 1456 Arguments: 1457 bounds : (list, actor) 1458 bounding box sizes 1459 dims : (list) 1460 dimensions (nr. of voxels) of the output volume. 1461 invert : (bool) 1462 flip the sign 1463 max_radius : (float) 1464 specify how far out to propagate distance calculation 1465 1466 Examples: 1467 - [distance2mesh.py](https://github.com/marcomusy/vedo/blob/master/examples/basic/distance2mesh.py) 1468 1469 ![](https://vedo.embl.es/images/basic/distance2mesh.png) 1470 """ 1471 if bounds is None: 1472 bounds = self.bounds() 1473 if max_radius is None: 1474 max_radius = self.diagonal_size() / 2 1475 dist = vtki.new("SignedDistance") 1476 dist.SetInputData(self.dataset) 1477 dist.SetRadius(max_radius) 1478 dist.SetBounds(bounds) 1479 dist.SetDimensions(dims) 1480 dist.Update() 1481 img = dist.GetOutput() 1482 if invert: 1483 mat = vtki.new("ImageMathematics") 1484 mat.SetInput1Data(img) 1485 mat.SetOperationToMultiplyByK() 1486 mat.SetConstantK(-1) 1487 mat.Update() 1488 img = mat.GetOutput() 1489 1490 vol = vedo.Volume(img) 1491 vol.name = "SignedDistanceVolume" 1492 vol.pipeline = utils.OperationNode( 1493 "signed_distance", 1494 parents=[self], 1495 comment=f"dims={tuple(vol.dimensions())}", 1496 c="#e9c46a:#0096c7", 1497 ) 1498 return vol
Compute the Volume
object whose voxels contains the signed distance from
the object. The calling object must have "Normals" defined.
Arguments:
- bounds : (list, actor) bounding box sizes
- dims : (list) dimensions (nr. of voxels) of the output volume.
- invert : (bool) flip the sign
- max_radius : (float) specify how far out to propagate distance calculation
Examples:
1500 def unsigned_distance( 1501 self, dims=(25,25,25), bounds=(), max_radius=0, cap_value=0) -> "vedo.Volume": 1502 """ 1503 Compute the `Volume` object whose voxels contains the unsigned distance. 1504 """ 1505 dist = vtki.new("UnsignedDistance") 1506 dist.SetInputData(self.dataset) 1507 dist.SetDimensions(dims) 1508 1509 if len(bounds) == 6: 1510 dist.SetBounds(bounds) 1511 # elif bounds == "auto": 1512 # dist.AdjustBoundsOn() 1513 else: 1514 dist.SetBounds(self.bounds()) 1515 if not max_radius: 1516 max_radius = self.diagonal_size() / 10 1517 dist.SetRadius(max_radius) 1518 1519 if self.point_locator: 1520 dist.SetLocator(self.point_locator) 1521 1522 if cap_value is not None: 1523 dist.CappingOn() 1524 dist.SetCapValue(cap_value) 1525 dist.SetOutputScalarTypeToFloat() 1526 dist.Update() 1527 vol = vedo.Volume(dist.GetOutput()) 1528 vol.name = "UnsignedDistanceVolume" 1529 vol.pipeline = utils.OperationNode( 1530 "unsigned_distance", parents=[self], c="#e9c46a:#0096c7") 1531 return vol
Compute the Volume
object whose voxels contains the unsigned distance.
1533 def smooth_data(self, 1534 niter=10, relaxation_factor=0.1, strategy=0, mask=None, 1535 exclude=("Normals", "TextureCoordinates"), 1536 ) -> Self: 1537 """ 1538 Smooth point attribute data using distance weighted Laplacian kernel. 1539 1540 The effect is to blur regions of high variation and emphasize low variation regions. 1541 1542 Arguments: 1543 niter : (int) 1544 number of iterations 1545 relaxation_factor : (float) 1546 relaxation factor controlling the amount of Laplacian smoothing applied 1547 strategy : (int) 1548 strategy to use for Laplacian smoothing 1549 - 0: use all points, all point data attributes are smoothed 1550 - 1: smooth all point attribute data except those on the boundary 1551 - 2: only point data connected to a boundary point are smoothed 1552 mask : (str, np.ndarray) 1553 array to be used as a mask (ignore then the strategy keyword) 1554 exclude : (list) 1555 list of arrays to be excluded from smoothing 1556 """ 1557 try: 1558 saf = vtki.new("AttributeSmoothingFilter") 1559 except: 1560 vedo.logger.error("smooth_data() only avaialble in VTK>=9.3.0") 1561 return self 1562 saf.SetInputData(self.dataset) 1563 saf.SetRelaxationFactor(relaxation_factor) 1564 saf.SetNumberOfIterations(niter) 1565 1566 for ex in exclude: 1567 saf.AddExcludedArray(ex) 1568 1569 saf.SetWeightsTypeToDistance2() 1570 1571 saf.SetSmoothingStrategy(strategy) 1572 if mask is not None: 1573 saf.SetSmoothingStrategyToSmoothingMask() 1574 if isinstance(mask, str): 1575 mask_ = self.dataset.GetPointData().GetArray(mask) 1576 if not mask_: 1577 vedo.logger.error(f"smooth_data(): mask array {mask} not found") 1578 return self 1579 mask_array = vtki.vtkUnsignedCharArray() 1580 mask_array.ShallowCopy(mask_) 1581 mask_array.SetName(mask_.GetName()) 1582 else: 1583 mask_array = utils.numpy2vtk(mask, dtype=np.uint8) 1584 saf.SetSmoothingMask(mask_array) 1585 1586 saf.Update() 1587 1588 self._update(saf.GetOutput()) 1589 self.pipeline = utils.OperationNode( 1590 "smooth_data", comment=f"strategy {strategy}", parents=[self], c="#9e2a2b" 1591 ) 1592 return self
Smooth point attribute data using distance weighted Laplacian kernel.
The effect is to blur regions of high variation and emphasize low variation regions.
Arguments:
- niter : (int) number of iterations
- relaxation_factor : (float) relaxation factor controlling the amount of Laplacian smoothing applied
- strategy : (int) strategy to use for Laplacian smoothing - 0: use all points, all point data attributes are smoothed - 1: smooth all point attribute data except those on the boundary - 2: only point data connected to a boundary point are smoothed
- mask : (str, np.ndarray) array to be used as a mask (ignore then the strategy keyword)
- exclude : (list) list of arrays to be excluded from smoothing
1594 def compute_streamlines( 1595 self, 1596 seeds: Any, 1597 integrator="rk4", 1598 direction="forward", 1599 initial_step_size=None, 1600 max_propagation=None, 1601 max_steps=10000, 1602 step_length=0, 1603 surface_constrained=False, 1604 compute_vorticity=False, 1605 ) -> Union["vedo.Lines", None]: 1606 """ 1607 Integrate a vector field to generate streamlines. 1608 1609 Arguments: 1610 seeds : (Mesh, Points, list) 1611 starting points of the streamlines 1612 integrator : (str) 1613 type of integration method to be used: 1614 - "rk2" (Runge-Kutta 2) 1615 - "rk4" (Runge-Kutta 4) 1616 - "rk45" (Runge-Kutta 45) 1617 direction : (str) 1618 direction of integration, either "forward", "backward" or "both" 1619 initial_step_size : (float) 1620 initial step size used for line integration 1621 max_propagation : (float) 1622 maximum length of a streamline expressed in absolute units 1623 max_steps : (int) 1624 maximum number of steps for a streamline 1625 step_length : (float) 1626 maximum length of a step expressed in absolute units 1627 surface_constrained : (bool) 1628 whether to stop integrating when the streamline leaves the surface 1629 compute_vorticity : (bool) 1630 whether to compute the vorticity at each streamline point 1631 """ 1632 b = self.dataset.GetBounds() 1633 size = (b[5]-b[4] + b[3]-b[2] + b[1]-b[0]) / 3 1634 if initial_step_size is None: 1635 initial_step_size = size / 1000.0 1636 1637 if max_propagation is None: 1638 max_propagation = size * 2 1639 1640 if utils.is_sequence(seeds): 1641 seeds = vedo.Points(seeds) 1642 1643 sti = vtki.new("StreamTracer") 1644 sti.SetSourceData(seeds.dataset) 1645 if isinstance(self, vedo.RectilinearGrid): 1646 sti.SetInputData(vedo.UnstructuredGrid(self.dataset).dataset) 1647 else: 1648 sti.SetInputDataObject(self.dataset) 1649 1650 sti.SetInitialIntegrationStep(initial_step_size) 1651 sti.SetComputeVorticity(compute_vorticity) 1652 sti.SetMaximumNumberOfSteps(max_steps) 1653 sti.SetMaximumPropagation(max_propagation) 1654 sti.SetSurfaceStreamlines(surface_constrained) 1655 if step_length: 1656 sti.SetMaximumIntegrationStep(step_length) 1657 1658 if "for" in direction: 1659 sti.SetIntegrationDirectionToForward() 1660 elif "back" in direction: 1661 sti.SetIntegrationDirectionToBackward() 1662 elif "both" in direction: 1663 sti.SetIntegrationDirectionToBoth() 1664 else: 1665 vedo.logger.error(f"in compute_streamlines(), unknown direction {direction}") 1666 return None 1667 1668 if integrator == "rk2": 1669 sti.SetIntegratorTypeToRungeKutta2() 1670 elif integrator == "rk4": 1671 sti.SetIntegratorTypeToRungeKutta4() 1672 elif integrator == "rk45": 1673 sti.SetIntegratorTypeToRungeKutta45() 1674 else: 1675 vedo.logger.error(f"in compute_streamlines(), unknown integrator {integrator}") 1676 return None 1677 1678 sti.Update() 1679 1680 stlines = vedo.shapes.Lines(sti.GetOutput(), lw=4) 1681 stlines.name = "StreamLines" 1682 self.pipeline = utils.OperationNode( 1683 "compute_streamlines", comment=f"{integrator}", parents=[self, seeds], c="#9e2a2b" 1684 ) 1685 return stlines
Integrate a vector field to generate streamlines.
Arguments:
- seeds : (Mesh, Points, list) starting points of the streamlines
- integrator : (str) type of integration method to be used: - "rk2" (Runge-Kutta 2) - "rk4" (Runge-Kutta 4) - "rk45" (Runge-Kutta 45)
- direction : (str) direction of integration, either "forward", "backward" or "both"
- initial_step_size : (float) initial step size used for line integration
- max_propagation : (float) maximum length of a streamline expressed in absolute units
- max_steps : (int) maximum number of steps for a streamline
- step_length : (float) maximum length of a step expressed in absolute units
- surface_constrained : (bool) whether to stop integrating when the streamline leaves the surface
- compute_vorticity : (bool) whether to compute the vorticity at each streamline point
1688class PointAlgorithms(CommonAlgorithms): 1689 """Methods for point clouds.""" 1690 1691 def apply_transform(self, LT: Any, deep_copy=True) -> Self: 1692 """ 1693 Apply a linear or non-linear transformation to the mesh polygonal data. 1694 1695 Example: 1696 ```python 1697 from vedo import Cube, show, settings 1698 settings.use_parallel_projection = True 1699 c1 = Cube().rotate_z(25).pos(2,1).mirror().alpha(0.5) 1700 T = c1.transform # rotate by 5 degrees, place at (2,1) 1701 c2 = Cube().c('red4').wireframe().lw(10).lighting('off') 1702 c2.apply_transform(T) 1703 show(c1, c2, "The 2 cubes should overlap!", axes=1).close() 1704 ``` 1705 1706 ![](https://vedo.embl.es/images/feats/apply_transform.png) 1707 """ 1708 if self.dataset.GetNumberOfPoints() == 0: 1709 return self 1710 1711 if isinstance(LT, LinearTransform): 1712 LT_is_linear = True 1713 tr = LT.T 1714 if LT.is_identity(): 1715 return self 1716 1717 elif isinstance(LT, (vtki.vtkMatrix4x4, vtki.vtkLinearTransform)) or utils.is_sequence(LT): 1718 LT_is_linear = True 1719 LT = LinearTransform(LT) 1720 tr = LT.T 1721 if LT.is_identity(): 1722 return self 1723 1724 elif isinstance(LT, NonLinearTransform): 1725 LT_is_linear = False 1726 tr = LT.T 1727 self.transform = LT # reset 1728 1729 elif isinstance(LT, vtki.vtkThinPlateSplineTransform): 1730 LT_is_linear = False 1731 tr = LT 1732 self.transform = NonLinearTransform(LT) # reset 1733 1734 else: 1735 vedo.logger.error(f"apply_transform(), unknown input type:\n{LT}") 1736 return self 1737 1738 ################ 1739 if LT_is_linear: 1740 try: 1741 # self.transform might still not be linear 1742 self.transform.concatenate(LT) 1743 except AttributeError: 1744 # in that case reset it 1745 self.transform = LinearTransform() 1746 1747 ################ 1748 if isinstance(self.dataset, vtki.vtkPolyData): 1749 tp = vtki.new("TransformPolyDataFilter") 1750 elif isinstance(self.dataset, vtki.vtkUnstructuredGrid): 1751 tp = vtki.new("TransformFilter") 1752 tp.TransformAllInputVectorsOn() 1753 # elif isinstance(self.dataset, vtki.vtkImageData): 1754 # tp = vtki.new("ImageReslice") 1755 # tp.SetInterpolationModeToCubic() 1756 # tp.SetResliceTransform(tr) 1757 else: 1758 vedo.logger.error(f"apply_transform(), unknown input type: {[self.dataset]}") 1759 return self 1760 1761 tp.SetTransform(tr) 1762 tp.SetInputData(self.dataset) 1763 tp.Update() 1764 out = tp.GetOutput() 1765 1766 if deep_copy: 1767 self.dataset.DeepCopy(out) 1768 else: 1769 self.dataset.ShallowCopy(out) 1770 1771 # reset the locators 1772 self.point_locator = None 1773 self.cell_locator = None 1774 self.line_locator = None 1775 return self 1776 1777 def apply_transform_from_actor(self) -> LinearTransform: 1778 """ 1779 Apply the current transformation of the actor to the data. 1780 Useful when manually moving an actor (eg. when pressing "a"). 1781 Returns the `LinearTransform` object. 1782 1783 Note that this method is automatically called when the window is closed, 1784 or the interactor style is changed. 1785 """ 1786 M = self.actor.GetMatrix() 1787 self.apply_transform(M) 1788 iden = vtki.vtkMatrix4x4() 1789 self.actor.PokeMatrix(iden) 1790 return LinearTransform(M) 1791 1792 def pos(self, x=None, y=None, z=None) -> Self: 1793 """Set/Get object position.""" 1794 if x is None: # get functionality 1795 return self.transform.position 1796 1797 if z is None and y is None: # assume x is of the form (x,y,z) 1798 if len(x) == 3: 1799 x, y, z = x 1800 else: 1801 x, y = x 1802 z = 0 1803 elif z is None: # assume x,y is of the form x, y 1804 z = 0 1805 1806 q = self.transform.position 1807 delta = [x, y, z] - q 1808 if delta[0] == delta[1] == delta[2] == 0: 1809 return self 1810 LT = LinearTransform().translate(delta) 1811 return self.apply_transform(LT) 1812 1813 def shift(self, dx=0, dy=0, dz=0) -> Self: 1814 """Add a vector to the current object position.""" 1815 if utils.is_sequence(dx): 1816 dx, dy, dz = utils.make3d(dx) 1817 if dx == dy == dz == 0: 1818 return self 1819 LT = LinearTransform().translate([dx, dy, dz]) 1820 return self.apply_transform(LT) 1821 1822 def x(self, val=None) -> Self: 1823 """Set/Get object position along x axis.""" 1824 p = self.transform.position 1825 if val is None: 1826 return p[0] 1827 self.pos(val, p[1], p[2]) 1828 return self 1829 1830 def y(self, val=None)-> Self: 1831 """Set/Get object position along y axis.""" 1832 p = self.transform.position 1833 if val is None: 1834 return p[1] 1835 self.pos(p[0], val, p[2]) 1836 return self 1837 1838 def z(self, val=None) -> Self: 1839 """Set/Get object position along z axis.""" 1840 p = self.transform.position 1841 if val is None: 1842 return p[2] 1843 self.pos(p[0], p[1], val) 1844 return self 1845 1846 def rotate(self, angle: float, axis=(1, 0, 0), point=(0, 0, 0), rad=False) -> Self: 1847 """ 1848 Rotate around an arbitrary `axis` passing through `point`. 1849 1850 Example: 1851 ```python 1852 from vedo import * 1853 c1 = Cube() 1854 c2 = c1.clone().c('violet').alpha(0.5) # copy of c1 1855 v = vector(0.2,1,0) 1856 p = vector(1,0,0) # axis passes through this point 1857 c2.rotate(90, axis=v, point=p) 1858 l = Line(-v+p, v+p).lw(3).c('red') 1859 show(c1, l, c2, axes=1).close() 1860 ``` 1861 1862 ![](https://vedo.embl.es/images/feats/rotate_axis.png) 1863 """ 1864 LT = LinearTransform() 1865 LT.rotate(angle, axis, point, rad) 1866 return self.apply_transform(LT) 1867 1868 def rotate_x(self, angle: float, rad=False, around=None) -> Self: 1869 """ 1870 Rotate around x-axis. If angle is in radians set `rad=True`. 1871 1872 Use `around` to define a pivoting point. 1873 """ 1874 if angle == 0: 1875 return self 1876 LT = LinearTransform().rotate_x(angle, rad, around) 1877 return self.apply_transform(LT) 1878 1879 def rotate_y(self, angle: float, rad=False, around=None) -> Self: 1880 """ 1881 Rotate around y-axis. If angle is in radians set `rad=True`. 1882 1883 Use `around` to define a pivoting point. 1884 """ 1885 if angle == 0: 1886 return self 1887 LT = LinearTransform().rotate_y(angle, rad, around) 1888 return self.apply_transform(LT) 1889 1890 def rotate_z(self, angle: float, rad=False, around=None) -> Self: 1891 """ 1892 Rotate around z-axis. If angle is in radians set `rad=True`. 1893 1894 Use `around` to define a pivoting point. 1895 """ 1896 if angle == 0: 1897 return self 1898 LT = LinearTransform().rotate_z(angle, rad, around) 1899 return self.apply_transform(LT) 1900 1901 def reorient(self, initaxis, newaxis, rotation=0, rad=False, xyplane=False) -> Self: 1902 """ 1903 Reorient the object to point to a new direction from an initial one. 1904 If `initaxis` is None, the object will be assumed in its "default" orientation. 1905 If `xyplane` is True, the object will be rotated to lie on the xy plane. 1906 1907 Use `rotation` to first rotate the object around its `initaxis`. 1908 """ 1909 q = self.transform.position 1910 LT = LinearTransform() 1911 LT.reorient(initaxis, newaxis, q, rotation, rad, xyplane) 1912 return self.apply_transform(LT) 1913 1914 def scale(self, s=None, reset=False, origin=True) -> Union[Self, np.array]: 1915 """ 1916 Set/get object's scaling factor. 1917 1918 Arguments: 1919 s : (list, float) 1920 scaling factor(s). 1921 reset : (bool) 1922 if True previous scaling factors are ignored. 1923 origin : (bool) 1924 if True scaling is applied with respect to object's position, 1925 otherwise is applied respect to (0,0,0). 1926 1927 Note: 1928 use `s=(sx,sy,sz)` to scale differently in the three coordinates. 1929 """ 1930 if s is None: 1931 return np.array(self.transform.T.GetScale()) 1932 1933 if not utils.is_sequence(s): 1934 s = [s, s, s] 1935 1936 LT = LinearTransform() 1937 if reset: 1938 old_s = np.array(self.transform.T.GetScale()) 1939 LT.scale(s / old_s) 1940 else: 1941 if origin is True: 1942 LT.scale(s, origin=self.transform.position) 1943 elif origin is False: 1944 LT.scale(s, origin=False) 1945 else: 1946 LT.scale(s, origin=origin) 1947 1948 return self.apply_transform(LT)
Methods for point clouds.
1691 def apply_transform(self, LT: Any, deep_copy=True) -> Self: 1692 """ 1693 Apply a linear or non-linear transformation to the mesh polygonal data. 1694 1695 Example: 1696 ```python 1697 from vedo import Cube, show, settings 1698 settings.use_parallel_projection = True 1699 c1 = Cube().rotate_z(25).pos(2,1).mirror().alpha(0.5) 1700 T = c1.transform # rotate by 5 degrees, place at (2,1) 1701 c2 = Cube().c('red4').wireframe().lw(10).lighting('off') 1702 c2.apply_transform(T) 1703 show(c1, c2, "The 2 cubes should overlap!", axes=1).close() 1704 ``` 1705 1706 ![](https://vedo.embl.es/images/feats/apply_transform.png) 1707 """ 1708 if self.dataset.GetNumberOfPoints() == 0: 1709 return self 1710 1711 if isinstance(LT, LinearTransform): 1712 LT_is_linear = True 1713 tr = LT.T 1714 if LT.is_identity(): 1715 return self 1716 1717 elif isinstance(LT, (vtki.vtkMatrix4x4, vtki.vtkLinearTransform)) or utils.is_sequence(LT): 1718 LT_is_linear = True 1719 LT = LinearTransform(LT) 1720 tr = LT.T 1721 if LT.is_identity(): 1722 return self 1723 1724 elif isinstance(LT, NonLinearTransform): 1725 LT_is_linear = False 1726 tr = LT.T 1727 self.transform = LT # reset 1728 1729 elif isinstance(LT, vtki.vtkThinPlateSplineTransform): 1730 LT_is_linear = False 1731 tr = LT 1732 self.transform = NonLinearTransform(LT) # reset 1733 1734 else: 1735 vedo.logger.error(f"apply_transform(), unknown input type:\n{LT}") 1736 return self 1737 1738 ################ 1739 if LT_is_linear: 1740 try: 1741 # self.transform might still not be linear 1742 self.transform.concatenate(LT) 1743 except AttributeError: 1744 # in that case reset it 1745 self.transform = LinearTransform() 1746 1747 ################ 1748 if isinstance(self.dataset, vtki.vtkPolyData): 1749 tp = vtki.new("TransformPolyDataFilter") 1750 elif isinstance(self.dataset, vtki.vtkUnstructuredGrid): 1751 tp = vtki.new("TransformFilter") 1752 tp.TransformAllInputVectorsOn() 1753 # elif isinstance(self.dataset, vtki.vtkImageData): 1754 # tp = vtki.new("ImageReslice") 1755 # tp.SetInterpolationModeToCubic() 1756 # tp.SetResliceTransform(tr) 1757 else: 1758 vedo.logger.error(f"apply_transform(), unknown input type: {[self.dataset]}") 1759 return self 1760 1761 tp.SetTransform(tr) 1762 tp.SetInputData(self.dataset) 1763 tp.Update() 1764 out = tp.GetOutput() 1765 1766 if deep_copy: 1767 self.dataset.DeepCopy(out) 1768 else: 1769 self.dataset.ShallowCopy(out) 1770 1771 # reset the locators 1772 self.point_locator = None 1773 self.cell_locator = None 1774 self.line_locator = None 1775 return self
Apply a linear or non-linear transformation to the mesh polygonal data.
Example:
from vedo import Cube, show, settings
settings.use_parallel_projection = True
c1 = Cube().rotate_z(25).pos(2,1).mirror().alpha(0.5)
T = c1.transform # rotate by 5 degrees, place at (2,1)
c2 = Cube().c('red4').wireframe().lw(10).lighting('off')
c2.apply_transform(T)
show(c1, c2, "The 2 cubes should overlap!", axes=1).close()
1777 def apply_transform_from_actor(self) -> LinearTransform: 1778 """ 1779 Apply the current transformation of the actor to the data. 1780 Useful when manually moving an actor (eg. when pressing "a"). 1781 Returns the `LinearTransform` object. 1782 1783 Note that this method is automatically called when the window is closed, 1784 or the interactor style is changed. 1785 """ 1786 M = self.actor.GetMatrix() 1787 self.apply_transform(M) 1788 iden = vtki.vtkMatrix4x4() 1789 self.actor.PokeMatrix(iden) 1790 return LinearTransform(M)
Apply the current transformation of the actor to the data.
Useful when manually moving an actor (eg. when pressing "a").
Returns the LinearTransform
object.
Note that this method is automatically called when the window is closed, or the interactor style is changed.
1792 def pos(self, x=None, y=None, z=None) -> Self: 1793 """Set/Get object position.""" 1794 if x is None: # get functionality 1795 return self.transform.position 1796 1797 if z is None and y is None: # assume x is of the form (x,y,z) 1798 if len(x) == 3: 1799 x, y, z = x 1800 else: 1801 x, y = x 1802 z = 0 1803 elif z is None: # assume x,y is of the form x, y 1804 z = 0 1805 1806 q = self.transform.position 1807 delta = [x, y, z] - q 1808 if delta[0] == delta[1] == delta[2] == 0: 1809 return self 1810 LT = LinearTransform().translate(delta) 1811 return self.apply_transform(LT)
Set/Get object position.
1813 def shift(self, dx=0, dy=0, dz=0) -> Self: 1814 """Add a vector to the current object position.""" 1815 if utils.is_sequence(dx): 1816 dx, dy, dz = utils.make3d(dx) 1817 if dx == dy == dz == 0: 1818 return self 1819 LT = LinearTransform().translate([dx, dy, dz]) 1820 return self.apply_transform(LT)
Add a vector to the current object position.
1822 def x(self, val=None) -> Self: 1823 """Set/Get object position along x axis.""" 1824 p = self.transform.position 1825 if val is None: 1826 return p[0] 1827 self.pos(val, p[1], p[2]) 1828 return self
Set/Get object position along x axis.
1830 def y(self, val=None)-> Self: 1831 """Set/Get object position along y axis.""" 1832 p = self.transform.position 1833 if val is None: 1834 return p[1] 1835 self.pos(p[0], val, p[2]) 1836 return self
Set/Get object position along y axis.
1838 def z(self, val=None) -> Self: 1839 """Set/Get object position along z axis.""" 1840 p = self.transform.position 1841 if val is None: 1842 return p[2] 1843 self.pos(p[0], p[1], val) 1844 return self
Set/Get object position along z axis.
1846 def rotate(self, angle: float, axis=(1, 0, 0), point=(0, 0, 0), rad=False) -> Self: 1847 """ 1848 Rotate around an arbitrary `axis` passing through `point`. 1849 1850 Example: 1851 ```python 1852 from vedo import * 1853 c1 = Cube() 1854 c2 = c1.clone().c('violet').alpha(0.5) # copy of c1 1855 v = vector(0.2,1,0) 1856 p = vector(1,0,0) # axis passes through this point 1857 c2.rotate(90, axis=v, point=p) 1858 l = Line(-v+p, v+p).lw(3).c('red') 1859 show(c1, l, c2, axes=1).close() 1860 ``` 1861 1862 ![](https://vedo.embl.es/images/feats/rotate_axis.png) 1863 """ 1864 LT = LinearTransform() 1865 LT.rotate(angle, axis, point, rad) 1866 return self.apply_transform(LT)
Rotate around an arbitrary axis
passing through point
.
Example:
from vedo import *
c1 = Cube()
c2 = c1.clone().c('violet').alpha(0.5) # copy of c1
v = vector(0.2,1,0)
p = vector(1,0,0) # axis passes through this point
c2.rotate(90, axis=v, point=p)
l = Line(-v+p, v+p).lw(3).c('red')
show(c1, l, c2, axes=1).close()
1868 def rotate_x(self, angle: float, rad=False, around=None) -> Self: 1869 """ 1870 Rotate around x-axis. If angle is in radians set `rad=True`. 1871 1872 Use `around` to define a pivoting point. 1873 """ 1874 if angle == 0: 1875 return self 1876 LT = LinearTransform().rotate_x(angle, rad, around) 1877 return self.apply_transform(LT)
Rotate around x-axis. If angle is in radians set rad=True
.
Use around
to define a pivoting point.
1879 def rotate_y(self, angle: float, rad=False, around=None) -> Self: 1880 """ 1881 Rotate around y-axis. If angle is in radians set `rad=True`. 1882 1883 Use `around` to define a pivoting point. 1884 """ 1885 if angle == 0: 1886 return self 1887 LT = LinearTransform().rotate_y(angle, rad, around) 1888 return self.apply_transform(LT)
Rotate around y-axis. If angle is in radians set rad=True
.
Use around
to define a pivoting point.
1890 def rotate_z(self, angle: float, rad=False, around=None) -> Self: 1891 """ 1892 Rotate around z-axis. If angle is in radians set `rad=True`. 1893 1894 Use `around` to define a pivoting point. 1895 """ 1896 if angle == 0: 1897 return self 1898 LT = LinearTransform().rotate_z(angle, rad, around) 1899 return self.apply_transform(LT)
Rotate around z-axis. If angle is in radians set rad=True
.
Use around
to define a pivoting point.
1901 def reorient(self, initaxis, newaxis, rotation=0, rad=False, xyplane=False) -> Self: 1902 """ 1903 Reorient the object to point to a new direction from an initial one. 1904 If `initaxis` is None, the object will be assumed in its "default" orientation. 1905 If `xyplane` is True, the object will be rotated to lie on the xy plane. 1906 1907 Use `rotation` to first rotate the object around its `initaxis`. 1908 """ 1909 q = self.transform.position 1910 LT = LinearTransform() 1911 LT.reorient(initaxis, newaxis, q, rotation, rad, xyplane) 1912 return self.apply_transform(LT)
Reorient the object to point to a new direction from an initial one.
If initaxis
is None, the object will be assumed in its "default" orientation.
If xyplane
is True, the object will be rotated to lie on the xy plane.
Use rotation
to first rotate the object around its initaxis
.
1914 def scale(self, s=None, reset=False, origin=True) -> Union[Self, np.array]: 1915 """ 1916 Set/get object's scaling factor. 1917 1918 Arguments: 1919 s : (list, float) 1920 scaling factor(s). 1921 reset : (bool) 1922 if True previous scaling factors are ignored. 1923 origin : (bool) 1924 if True scaling is applied with respect to object's position, 1925 otherwise is applied respect to (0,0,0). 1926 1927 Note: 1928 use `s=(sx,sy,sz)` to scale differently in the three coordinates. 1929 """ 1930 if s is None: 1931 return np.array(self.transform.T.GetScale()) 1932 1933 if not utils.is_sequence(s): 1934 s = [s, s, s] 1935 1936 LT = LinearTransform() 1937 if reset: 1938 old_s = np.array(self.transform.T.GetScale()) 1939 LT.scale(s / old_s) 1940 else: 1941 if origin is True: 1942 LT.scale(s, origin=self.transform.position) 1943 elif origin is False: 1944 LT.scale(s, origin=False) 1945 else: 1946 LT.scale(s, origin=origin) 1947 1948 return self.apply_transform(LT)
Set/get object's scaling factor.
Arguments:
- s : (list, float) scaling factor(s).
- reset : (bool) if True previous scaling factors are ignored.
- origin : (bool) if True scaling is applied with respect to object's position, otherwise is applied respect to (0,0,0).
Note:
use
s=(sx,sy,sz)
to scale differently in the three coordinates.
Inherited Members
- CommonAlgorithms
- pointdata
- celldata
- metadata
- memory_address
- memory_size
- modified
- box
- update_dataset
- bounds
- xbounds
- ybounds
- zbounds
- diagonal_size
- average_size
- center_of_mass
- copy_data_from
- inputdata
- npoints
- nvertices
- ncells
- points
- cell_centers
- lines
- lines_as_flat_array
- mark_boundaries
- find_cells_in_bounds
- find_cells_along_line
- find_cells_along_plane
- keep_cell_types
- map_cells_to_points
- vertices
- coordinates
- cells_as_flat_array
- cells
- map_points_to_cells
- resample_data_from
- interpolate_data_from
- add_ids
- gradient
- divergence
- vorticity
- probe
- compute_cell_size
- generate_random_data
- integrate_data
- write
- tomesh
- signed_distance
- unsigned_distance
- smooth_data
- compute_streamlines
1952class VolumeAlgorithms(CommonAlgorithms): 1953 """Methods for Volume objects.""" 1954 1955 def bounds(self) -> np.ndarray: 1956 """ 1957 Get the object bounds. 1958 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 1959 """ 1960 # OVERRIDE CommonAlgorithms.bounds() which is too slow 1961 return np.array(self.dataset.GetBounds()) 1962 1963 def isosurface(self, value=None, flying_edges=False) -> "vedo.mesh.Mesh": 1964 """ 1965 Return an `Mesh` isosurface extracted from the `Volume` object. 1966 1967 Set `value` as single float or list of values to draw the isosurface(s). 1968 Use flying_edges for faster results (but sometimes can interfere with `smooth()`). 1969 1970 Examples: 1971 - [isosurfaces1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces1.py) 1972 1973 ![](https://vedo.embl.es/images/volumetric/isosurfaces.png) 1974 """ 1975 scrange = self.dataset.GetScalarRange() 1976 1977 if flying_edges: 1978 cf = vtki.new("FlyingEdges3D") 1979 cf.InterpolateAttributesOn() 1980 else: 1981 cf = vtki.new("ContourFilter") 1982 cf.UseScalarTreeOn() 1983 1984 cf.SetInputData(self.dataset) 1985 cf.ComputeNormalsOn() 1986 1987 if utils.is_sequence(value): 1988 cf.SetNumberOfContours(len(value)) 1989 for i, t in enumerate(value): 1990 cf.SetValue(i, t) 1991 else: 1992 if value is None: 1993 value = (2 * scrange[0] + scrange[1]) / 3.0 1994 # print("automatic isosurface value =", value) 1995 cf.SetValue(0, value) 1996 1997 cf.Update() 1998 poly = cf.GetOutput() 1999 2000 out = vedo.mesh.Mesh(poly, c=None).phong() 2001 out.mapper.SetScalarRange(scrange[0], scrange[1]) 2002 2003 out.pipeline = utils.OperationNode( 2004 "isosurface", 2005 parents=[self], 2006 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 2007 c="#4cc9f0:#e9c46a", 2008 ) 2009 return out 2010 2011 def isosurface_discrete(self, value=None, nsmooth=15) -> "vedo.mesh.Mesh": 2012 """ 2013 Create boundary/isocontour surfaces from a label map (e.g., a segmented image) using a threaded, 2014 3D version of the multiple objects/labels Surface Nets algorithm. 2015 The input is a 3D image (i.e., volume) where each voxel is labeled 2016 (integer labels are preferred to real values), and the output data is a polygonal mesh separating 2017 labeled regions / objects. 2018 (Note that on output each region [corresponding to a different segmented object] will share 2019 points/edges on a common boundary, i.e., two neighboring objects will share the boundary that separates them). 2020 2021 Arguments: 2022 value : (float, list) 2023 single value or list of values to draw the isosurface(s). 2024 nsmooth : (int) 2025 number of iterations of smoothing (0 means no smoothing). 2026 2027 Examples: 2028 - [isosurfaces2.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces2.py) 2029 """ 2030 if not utils.is_sequence(value): 2031 value = [value] 2032 2033 snets = vtki.new("SurfaceNets3D") 2034 snets.SetInputData(self.dataset) 2035 2036 if nsmooth: 2037 snets.SmoothingOn() 2038 snets.AutomaticSmoothingConstraintsOn() 2039 snets.GetSmoother().SetNumberOfIterations(nsmooth) 2040 # snets.GetSmoother().SetRelaxationFactor(relaxation_factor) 2041 # snets.GetSmoother().SetConstraintDistance(constraint_distance) 2042 else: 2043 snets.SmoothingOff() 2044 2045 for i, val in enumerate(value): 2046 snets.SetValue(i, val) 2047 snets.Update() 2048 snets.SetOutputMeshTypeToTriangles() 2049 snets.SetOutputStyleToBoundary() 2050 snets.Update() 2051 2052 out = vedo.mesh.Mesh(snets.GetOutput()) 2053 out.pipeline = utils.OperationNode( 2054 "isosurface_discrete", 2055 parents=[self], 2056 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 2057 c="#4cc9f0:#e9c46a", 2058 ) 2059 return out 2060 2061 2062 def legosurface( 2063 self, 2064 vmin=None, 2065 vmax=None, 2066 invert=False, 2067 boundary=False, 2068 array_name="input_scalars", 2069 ) -> "vedo.mesh.Mesh": 2070 """ 2071 Represent an object - typically a `Volume` - as lego blocks (voxels). 2072 By default colors correspond to the volume's scalar. 2073 Returns an `Mesh` object. 2074 2075 Arguments: 2076 vmin : (float) 2077 the lower threshold, voxels below this value are not shown. 2078 vmax : (float) 2079 the upper threshold, voxels above this value are not shown. 2080 boundary : (bool) 2081 controls whether to include cells that are partially inside 2082 array_name : (int, str) 2083 name or index of the scalar array to be considered 2084 2085 Examples: 2086 - [legosurface.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/legosurface.py) 2087 2088 ![](https://vedo.embl.es/images/volumetric/56820682-da40e500-684c-11e9-8ea3-91cbcba24b3a.png) 2089 """ 2090 imp_dataset = vtki.new("ImplicitDataSet") 2091 imp_dataset.SetDataSet(self.dataset) 2092 window = vtki.new("ImplicitWindowFunction") 2093 window.SetImplicitFunction(imp_dataset) 2094 2095 srng = list(self.dataset.GetScalarRange()) 2096 if vmin is not None: 2097 srng[0] = vmin 2098 if vmax is not None: 2099 srng[1] = vmax 2100 tol = 0.00001 * (srng[1] - srng[0]) 2101 srng[0] -= tol 2102 srng[1] += tol 2103 window.SetWindowRange(srng) 2104 2105 extract = vtki.new("ExtractGeometry") 2106 extract.SetInputData(self.dataset) 2107 extract.SetImplicitFunction(window) 2108 extract.SetExtractInside(invert) 2109 extract.SetExtractBoundaryCells(boundary) 2110 extract.Update() 2111 2112 gf = vtki.new("GeometryFilter") 2113 gf.SetInputData(extract.GetOutput()) 2114 gf.Update() 2115 2116 m = vedo.mesh.Mesh(gf.GetOutput()).lw(0.1).flat() 2117 m.map_points_to_cells() 2118 m.celldata.select(array_name) 2119 2120 m.pipeline = utils.OperationNode( 2121 "legosurface", 2122 parents=[self], 2123 comment=f"array: {array_name}", 2124 c="#4cc9f0:#e9c46a", 2125 ) 2126 return m 2127 2128 def tomesh(self, fill=True, shrink=1.0) -> "vedo.mesh.Mesh": 2129 """ 2130 Build a polygonal Mesh from the current object. 2131 2132 If `fill=True`, the interior faces of all the cells are created. 2133 (setting a `shrink` value slightly smaller than the default 1.0 2134 can avoid flickering due to internal adjacent faces). 2135 2136 If `fill=False`, only the boundary faces will be generated. 2137 """ 2138 gf = vtki.new("GeometryFilter") 2139 if fill: 2140 sf = vtki.new("ShrinkFilter") 2141 sf.SetInputData(self.dataset) 2142 sf.SetShrinkFactor(shrink) 2143 sf.Update() 2144 gf.SetInputData(sf.GetOutput()) 2145 gf.Update() 2146 poly = gf.GetOutput() 2147 if shrink == 1.0: 2148 clean_poly = vtki.new("CleanPolyData") 2149 clean_poly.PointMergingOn() 2150 clean_poly.ConvertLinesToPointsOn() 2151 clean_poly.ConvertPolysToLinesOn() 2152 clean_poly.ConvertStripsToPolysOn() 2153 clean_poly.SetInputData(poly) 2154 clean_poly.Update() 2155 poly = clean_poly.GetOutput() 2156 else: 2157 gf.SetInputData(self.dataset) 2158 gf.Update() 2159 poly = gf.GetOutput() 2160 2161 msh = vedo.mesh.Mesh(poly).flat() 2162 msh.scalarbar = self.scalarbar 2163 lut = utils.ctf2lut(self) 2164 if lut: 2165 msh.mapper.SetLookupTable(lut) 2166 2167 msh.pipeline = utils.OperationNode( 2168 "tomesh", parents=[self], comment=f"fill={fill}", c="#9e2a2b:#e9c46a" 2169 ) 2170 return msh
Methods for Volume objects.
1955 def bounds(self) -> np.ndarray: 1956 """ 1957 Get the object bounds. 1958 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 1959 """ 1960 # OVERRIDE CommonAlgorithms.bounds() which is too slow 1961 return np.array(self.dataset.GetBounds())
Get the object bounds.
Returns a list in format [xmin,xmax, ymin,ymax, zmin,zmax]
.
1963 def isosurface(self, value=None, flying_edges=False) -> "vedo.mesh.Mesh": 1964 """ 1965 Return an `Mesh` isosurface extracted from the `Volume` object. 1966 1967 Set `value` as single float or list of values to draw the isosurface(s). 1968 Use flying_edges for faster results (but sometimes can interfere with `smooth()`). 1969 1970 Examples: 1971 - [isosurfaces1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces1.py) 1972 1973 ![](https://vedo.embl.es/images/volumetric/isosurfaces.png) 1974 """ 1975 scrange = self.dataset.GetScalarRange() 1976 1977 if flying_edges: 1978 cf = vtki.new("FlyingEdges3D") 1979 cf.InterpolateAttributesOn() 1980 else: 1981 cf = vtki.new("ContourFilter") 1982 cf.UseScalarTreeOn() 1983 1984 cf.SetInputData(self.dataset) 1985 cf.ComputeNormalsOn() 1986 1987 if utils.is_sequence(value): 1988 cf.SetNumberOfContours(len(value)) 1989 for i, t in enumerate(value): 1990 cf.SetValue(i, t) 1991 else: 1992 if value is None: 1993 value = (2 * scrange[0] + scrange[1]) / 3.0 1994 # print("automatic isosurface value =", value) 1995 cf.SetValue(0, value) 1996 1997 cf.Update() 1998 poly = cf.GetOutput() 1999 2000 out = vedo.mesh.Mesh(poly, c=None).phong() 2001 out.mapper.SetScalarRange(scrange[0], scrange[1]) 2002 2003 out.pipeline = utils.OperationNode( 2004 "isosurface", 2005 parents=[self], 2006 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 2007 c="#4cc9f0:#e9c46a", 2008 ) 2009 return out
Return an Mesh
isosurface extracted from the Volume
object.
Set value
as single float or list of values to draw the isosurface(s).
Use flying_edges for faster results (but sometimes can interfere with smooth()
).
Examples:
2011 def isosurface_discrete(self, value=None, nsmooth=15) -> "vedo.mesh.Mesh": 2012 """ 2013 Create boundary/isocontour surfaces from a label map (e.g., a segmented image) using a threaded, 2014 3D version of the multiple objects/labels Surface Nets algorithm. 2015 The input is a 3D image (i.e., volume) where each voxel is labeled 2016 (integer labels are preferred to real values), and the output data is a polygonal mesh separating 2017 labeled regions / objects. 2018 (Note that on output each region [corresponding to a different segmented object] will share 2019 points/edges on a common boundary, i.e., two neighboring objects will share the boundary that separates them). 2020 2021 Arguments: 2022 value : (float, list) 2023 single value or list of values to draw the isosurface(s). 2024 nsmooth : (int) 2025 number of iterations of smoothing (0 means no smoothing). 2026 2027 Examples: 2028 - [isosurfaces2.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces2.py) 2029 """ 2030 if not utils.is_sequence(value): 2031 value = [value] 2032 2033 snets = vtki.new("SurfaceNets3D") 2034 snets.SetInputData(self.dataset) 2035 2036 if nsmooth: 2037 snets.SmoothingOn() 2038 snets.AutomaticSmoothingConstraintsOn() 2039 snets.GetSmoother().SetNumberOfIterations(nsmooth) 2040 # snets.GetSmoother().SetRelaxationFactor(relaxation_factor) 2041 # snets.GetSmoother().SetConstraintDistance(constraint_distance) 2042 else: 2043 snets.SmoothingOff() 2044 2045 for i, val in enumerate(value): 2046 snets.SetValue(i, val) 2047 snets.Update() 2048 snets.SetOutputMeshTypeToTriangles() 2049 snets.SetOutputStyleToBoundary() 2050 snets.Update() 2051 2052 out = vedo.mesh.Mesh(snets.GetOutput()) 2053 out.pipeline = utils.OperationNode( 2054 "isosurface_discrete", 2055 parents=[self], 2056 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 2057 c="#4cc9f0:#e9c46a", 2058 ) 2059 return out
Create boundary/isocontour surfaces from a label map (e.g., a segmented image) using a threaded, 3D version of the multiple objects/labels Surface Nets algorithm. The input is a 3D image (i.e., volume) where each voxel is labeled (integer labels are preferred to real values), and the output data is a polygonal mesh separating labeled regions / objects. (Note that on output each region [corresponding to a different segmented object] will share points/edges on a common boundary, i.e., two neighboring objects will share the boundary that separates them).
Arguments:
- value : (float, list) single value or list of values to draw the isosurface(s).
- nsmooth : (int) number of iterations of smoothing (0 means no smoothing).
Examples:
2062 def legosurface( 2063 self, 2064 vmin=None, 2065 vmax=None, 2066 invert=False, 2067 boundary=False, 2068 array_name="input_scalars", 2069 ) -> "vedo.mesh.Mesh": 2070 """ 2071 Represent an object - typically a `Volume` - as lego blocks (voxels). 2072 By default colors correspond to the volume's scalar. 2073 Returns an `Mesh` object. 2074 2075 Arguments: 2076 vmin : (float) 2077 the lower threshold, voxels below this value are not shown. 2078 vmax : (float) 2079 the upper threshold, voxels above this value are not shown. 2080 boundary : (bool) 2081 controls whether to include cells that are partially inside 2082 array_name : (int, str) 2083 name or index of the scalar array to be considered 2084 2085 Examples: 2086 - [legosurface.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/legosurface.py) 2087 2088 ![](https://vedo.embl.es/images/volumetric/56820682-da40e500-684c-11e9-8ea3-91cbcba24b3a.png) 2089 """ 2090 imp_dataset = vtki.new("ImplicitDataSet") 2091 imp_dataset.SetDataSet(self.dataset) 2092 window = vtki.new("ImplicitWindowFunction") 2093 window.SetImplicitFunction(imp_dataset) 2094 2095 srng = list(self.dataset.GetScalarRange()) 2096 if vmin is not None: 2097 srng[0] = vmin 2098 if vmax is not None: 2099 srng[1] = vmax 2100 tol = 0.00001 * (srng[1] - srng[0]) 2101 srng[0] -= tol 2102 srng[1] += tol 2103 window.SetWindowRange(srng) 2104 2105 extract = vtki.new("ExtractGeometry") 2106 extract.SetInputData(self.dataset) 2107 extract.SetImplicitFunction(window) 2108 extract.SetExtractInside(invert) 2109 extract.SetExtractBoundaryCells(boundary) 2110 extract.Update() 2111 2112 gf = vtki.new("GeometryFilter") 2113 gf.SetInputData(extract.GetOutput()) 2114 gf.Update() 2115 2116 m = vedo.mesh.Mesh(gf.GetOutput()).lw(0.1).flat() 2117 m.map_points_to_cells() 2118 m.celldata.select(array_name) 2119 2120 m.pipeline = utils.OperationNode( 2121 "legosurface", 2122 parents=[self], 2123 comment=f"array: {array_name}", 2124 c="#4cc9f0:#e9c46a", 2125 ) 2126 return m
Represent an object - typically a Volume
- as lego blocks (voxels).
By default colors correspond to the volume's scalar.
Returns an Mesh
object.
Arguments:
- vmin : (float) the lower threshold, voxels below this value are not shown.
- vmax : (float) the upper threshold, voxels above this value are not shown.
- boundary : (bool) controls whether to include cells that are partially inside
- array_name : (int, str) name or index of the scalar array to be considered
Examples:
2128 def tomesh(self, fill=True, shrink=1.0) -> "vedo.mesh.Mesh": 2129 """ 2130 Build a polygonal Mesh from the current object. 2131 2132 If `fill=True`, the interior faces of all the cells are created. 2133 (setting a `shrink` value slightly smaller than the default 1.0 2134 can avoid flickering due to internal adjacent faces). 2135 2136 If `fill=False`, only the boundary faces will be generated. 2137 """ 2138 gf = vtki.new("GeometryFilter") 2139 if fill: 2140 sf = vtki.new("ShrinkFilter") 2141 sf.SetInputData(self.dataset) 2142 sf.SetShrinkFactor(shrink) 2143 sf.Update() 2144 gf.SetInputData(sf.GetOutput()) 2145 gf.Update() 2146 poly = gf.GetOutput() 2147 if shrink == 1.0: 2148 clean_poly = vtki.new("CleanPolyData") 2149 clean_poly.PointMergingOn() 2150 clean_poly.ConvertLinesToPointsOn() 2151 clean_poly.ConvertPolysToLinesOn() 2152 clean_poly.ConvertStripsToPolysOn() 2153 clean_poly.SetInputData(poly) 2154 clean_poly.Update() 2155 poly = clean_poly.GetOutput() 2156 else: 2157 gf.SetInputData(self.dataset) 2158 gf.Update() 2159 poly = gf.GetOutput() 2160 2161 msh = vedo.mesh.Mesh(poly).flat() 2162 msh.scalarbar = self.scalarbar 2163 lut = utils.ctf2lut(self) 2164 if lut: 2165 msh.mapper.SetLookupTable(lut) 2166 2167 msh.pipeline = utils.OperationNode( 2168 "tomesh", parents=[self], comment=f"fill={fill}", c="#9e2a2b:#e9c46a" 2169 ) 2170 return msh
Build a polygonal Mesh from the current object.
If fill=True
, the interior faces of all the cells are created.
(setting a shrink
value slightly smaller than the default 1.0
can avoid flickering due to internal adjacent faces).
If fill=False
, only the boundary faces will be generated.
Inherited Members
- CommonAlgorithms
- pointdata
- celldata
- metadata
- memory_address
- memory_size
- modified
- box
- update_dataset
- xbounds
- ybounds
- zbounds
- diagonal_size
- average_size
- center_of_mass
- copy_data_from
- inputdata
- npoints
- nvertices
- ncells
- points
- cell_centers
- lines
- lines_as_flat_array
- mark_boundaries
- find_cells_in_bounds
- find_cells_along_line
- find_cells_along_plane
- keep_cell_types
- map_cells_to_points
- vertices
- coordinates
- cells_as_flat_array
- cells
- map_points_to_cells
- resample_data_from
- interpolate_data_from
- add_ids
- gradient
- divergence
- vorticity
- probe
- compute_cell_size
- generate_random_data
- integrate_data
- write
- signed_distance
- unsigned_distance
- smooth_data
- compute_streamlines