vedo.core
Base classes providing functionality to different vedo objects.
1#!/usr/bin/env python3 2# -*- coding: utf-8 -*- 3import numpy as np 4from typing import Self, List, Union, Any 5 6import vedo.vtkclasses as vtki 7 8import vedo 9from vedo import colors 10from vedo import utils 11from vedo.transformations import LinearTransform, NonLinearTransform 12 13 14__docformat__ = "google" 15 16__doc__ = """ 17Base classes providing functionality to different vedo objects. 18 19![](https://vedo.embl.es/images/feats/algorithms_illustration.png) 20""" 21 22__all__ = [ 23 "DataArrayHelper", 24 "CommonAlgorithms", 25 "PointAlgorithms", 26 "VolumeAlgorithms", 27] 28 29warnings = dict( 30 points_getter=( 31 "WARNING: points() is deprecated, use vertices instead. Change:\n" 32 " mesh.points() -> mesh.vertices\n" 33 " (silence this with vedo.core.warnings['points_getter']=False)" 34 ), 35 points_setter=( 36 "WARNING: points() is deprecated, use vertices instead. Change:\n" 37 " mesh.points([[x,y,z], ...]) -> mesh.vertices = [[x,y,z], ...]\n" 38 " (silence this with vedo.core.warnings['points_getter']=False)" 39 ), 40) 41 42############################################################################### 43class DataArrayHelper: 44 # Internal use only. 45 # Helper class to manage data associated to either 46 # points (or vertices) and cells (or faces). 47 def __init__(self, obj, association): 48 49 self.obj = obj 50 self.association = association 51 52 def __getitem__(self, key): 53 54 if self.association == 0: 55 data = self.obj.dataset.GetPointData() 56 57 elif self.association == 1: 58 data = self.obj.dataset.GetCellData() 59 60 elif self.association == 2: 61 data = self.obj.dataset.GetFieldData() 62 63 varr = data.GetAbstractArray(key) 64 if isinstance(varr, vtki.vtkStringArray): 65 if isinstance(key, int): 66 key = data.GetArrayName(key) 67 n = varr.GetNumberOfValues() 68 narr = [varr.GetValue(i) for i in range(n)] 69 return narr 70 ########### 71 72 else: 73 raise RuntimeError() 74 75 if isinstance(key, int): 76 key = data.GetArrayName(key) 77 78 arr = data.GetArray(key) 79 if not arr: 80 return None 81 return utils.vtk2numpy(arr) 82 83 def __setitem__(self, key, input_array): 84 85 if self.association == 0: 86 data = self.obj.dataset.GetPointData() 87 n = self.obj.dataset.GetNumberOfPoints() 88 self.obj.mapper.SetScalarModeToUsePointData() 89 90 elif self.association == 1: 91 data = self.obj.dataset.GetCellData() 92 n = self.obj.dataset.GetNumberOfCells() 93 self.obj.mapper.SetScalarModeToUseCellData() 94 95 elif self.association == 2: 96 data = self.obj.dataset.GetFieldData() 97 if not utils.is_sequence(input_array): 98 input_array = [input_array] 99 100 if isinstance(input_array[0], str): 101 varr = vtki.vtkStringArray() 102 varr.SetName(key) 103 varr.SetNumberOfComponents(1) 104 varr.SetNumberOfTuples(len(input_array)) 105 for i, iarr in enumerate(input_array): 106 if isinstance(iarr, np.ndarray): 107 iarr = iarr.tolist() # better format 108 # Note: a string k can be converted to numpy with 109 # import json; k = np.array(json.loads(k)) 110 varr.InsertValue(i, str(iarr)) 111 else: 112 try: 113 varr = utils.numpy2vtk(input_array, name=key) 114 except TypeError as e: 115 vedo.logger.error( 116 f"cannot create metadata with input object:\n" 117 f"{input_array}" 118 f"\n\nAllowed content examples are:\n" 119 f"- flat list of strings ['a','b', 1, [1,2,3], ...]" 120 f" (first item must be a string in this case)\n" 121 f" hint: use k = np.array(json.loads(k)) to convert strings\n" 122 f"- numpy arrays of any shape" 123 ) 124 raise e 125 126 data.AddArray(varr) 127 return ############ 128 129 else: 130 raise RuntimeError() 131 132 if len(input_array) != n: 133 vedo.logger.error( 134 f"Error in point/cell data: length of input {len(input_array)}" 135 f" != {n} nr. of elements" 136 ) 137 raise RuntimeError() 138 139 input_array = np.asarray(input_array) 140 varr = utils.numpy2vtk(input_array, name=key) 141 data.AddArray(varr) 142 143 if len(input_array.shape) == 1: # scalars 144 data.SetActiveScalars(key) 145 try: # could be a volume mapper 146 self.obj.mapper.SetScalarRange(data.GetScalars().GetRange()) 147 except AttributeError: 148 pass 149 elif len(input_array.shape) == 2 and input_array.shape[1] == 3: # vectors 150 if key.lower() == "normals": 151 data.SetActiveNormals(key) 152 else: 153 data.SetActiveVectors(key) 154 155 def keys(self) -> List[str]: 156 """Return the list of available data array names""" 157 if self.association == 0: 158 data = self.obj.dataset.GetPointData() 159 elif self.association == 1: 160 data = self.obj.dataset.GetCellData() 161 elif self.association == 2: 162 data = self.obj.dataset.GetFieldData() 163 arrnames = [] 164 for i in range(data.GetNumberOfArrays()): 165 name = "" 166 if self.association == 2: 167 name = data.GetAbstractArray(i).GetName() 168 else: 169 iarr = data.GetArray(i) 170 if iarr: 171 name = iarr.GetName() 172 if name: 173 arrnames.append(name) 174 return arrnames 175 176 def items(self) -> List: 177 """Return the list of available data array `(names, values)`.""" 178 if self.association == 0: 179 data = self.obj.dataset.GetPointData() 180 elif self.association == 1: 181 data = self.obj.dataset.GetCellData() 182 elif self.association == 2: 183 data = self.obj.dataset.GetFieldData() 184 arrnames = [] 185 for i in range(data.GetNumberOfArrays()): 186 if self.association == 2: 187 name = data.GetAbstractArray(i).GetName() 188 else: 189 name = data.GetArray(i).GetName() 190 if name: 191 arrnames.append((name, self[name])) 192 return arrnames 193 194 def todict(self) -> dict: 195 """Return a dictionary of the available data arrays.""" 196 return dict(self.items()) 197 198 def rename(self, oldname: str, newname: str) -> None: 199 """Rename an array""" 200 if self.association == 0: 201 varr = self.obj.dataset.GetPointData().GetArray(oldname) 202 elif self.association == 1: 203 varr = self.obj.dataset.GetCellData().GetArray(oldname) 204 elif self.association == 2: 205 varr = self.obj.dataset.GetFieldData().GetAbstractArray(oldname) 206 if varr: 207 varr.SetName(newname) 208 else: 209 vedo.logger.warning( 210 f"Cannot rename non existing array {oldname} to {newname}" 211 ) 212 213 def remove(self, key: Union[int, str]) -> None: 214 """Remove a data array by name or number""" 215 if self.association == 0: 216 self.obj.dataset.GetPointData().RemoveArray(key) 217 elif self.association == 1: 218 self.obj.dataset.GetCellData().RemoveArray(key) 219 elif self.association == 2: 220 self.obj.dataset.GetFieldData().RemoveArray(key) 221 222 def clear(self) -> None: 223 """Remove all data associated to this object""" 224 if self.association == 0: 225 data = self.obj.dataset.GetPointData() 226 elif self.association == 1: 227 data = self.obj.dataset.GetCellData() 228 elif self.association == 2: 229 data = self.obj.dataset.GetFieldData() 230 for i in range(data.GetNumberOfArrays()): 231 if self.association == 2: 232 name = data.GetAbstractArray(i).GetName() 233 else: 234 name = data.GetArray(i).GetName() 235 data.RemoveArray(name) 236 237 def select(self, key: Union[int, str]) -> Any: 238 """Select one specific array by its name to make it the `active` one.""" 239 # Default (ColorModeToDefault): unsigned char scalars are treated as colors, 240 # and NOT mapped through the lookup table, while everything else is. 241 # ColorModeToDirectScalar extends ColorModeToDefault such that all integer 242 # types are treated as colors with values in the range 0-255 243 # and floating types are treated as colors with values in the range 0.0-1.0. 244 # Setting ColorModeToMapScalars means that all scalar data will be mapped 245 # through the lookup table. 246 # (Note that for multi-component scalars, the particular component 247 # to use for mapping can be specified using the SelectColorArray() method.) 248 if self.association == 0: 249 data = self.obj.dataset.GetPointData() 250 self.obj.mapper.SetScalarModeToUsePointData() 251 else: 252 data = self.obj.dataset.GetCellData() 253 self.obj.mapper.SetScalarModeToUseCellData() 254 255 if isinstance(key, int): 256 key = data.GetArrayName(key) 257 258 arr = data.GetArray(key) 259 if not arr: 260 return self.obj 261 262 nc = arr.GetNumberOfComponents() 263 # print("GetNumberOfComponents", nc) 264 if nc == 1: 265 data.SetActiveScalars(key) 266 elif nc == 2: 267 data.SetTCoords(arr) 268 elif nc in (3, 4): 269 if "rgb" in key.lower(): # type: ignore 270 data.SetActiveScalars(key) 271 try: 272 # could be a volume mapper 273 self.obj.mapper.SetColorModeToDirectScalars() 274 data.SetActiveVectors(None) # need this to fix bug in #1066 275 # print("SetColorModeToDirectScalars for", key) 276 except AttributeError: 277 pass 278 else: 279 data.SetActiveVectors(key) 280 elif nc == 9: 281 data.SetActiveTensors(key) 282 else: 283 vedo.logger.error(f"Cannot select array {key} with {nc} components") 284 return self.obj 285 286 try: 287 # could be a volume mapper 288 self.obj.mapper.SetArrayName(key) 289 self.obj.mapper.ScalarVisibilityOn() 290 except AttributeError: 291 pass 292 293 return self.obj 294 295 def select_texture_coords(self, key: Union[int,str]) -> Any: 296 """Select one specific array to be used as texture coordinates.""" 297 if self.association == 0: 298 data = self.obj.dataset.GetPointData() 299 else: 300 vedo.logger.warning("texture coordinates are only available for point data") 301 return 302 303 if isinstance(key, int): 304 key = data.GetArrayName(key) 305 data.SetTCoords(data.GetArray(key)) 306 return self.obj 307 308 def select_normals(self, key: Union[int,str]) -> Any: 309 """Select one specific normal array by its name to make it the "active" one.""" 310 if self.association == 0: 311 data = self.obj.dataset.GetPointData() 312 self.obj.mapper.SetScalarModeToUsePointData() 313 else: 314 data = self.obj.dataset.GetCellData() 315 self.obj.mapper.SetScalarModeToUseCellData() 316 317 if isinstance(key, int): 318 key = data.GetArrayName(key) 319 data.SetActiveNormals(key) 320 return self.obj 321 322 def print(self, **kwargs) -> None: 323 """Print the array names available to terminal""" 324 colors.printc(self.keys(), **kwargs) 325 326 def __repr__(self) -> str: 327 """Representation""" 328 329 def _get_str(pd, header): 330 out = f"\x1b[2m\x1b[1m\x1b[7m{header}" 331 if pd.GetNumberOfArrays(): 332 if self.obj.name: 333 out += f" in {self.obj.name}" 334 out += f" contains {pd.GetNumberOfArrays()} array(s)\x1b[0m" 335 for i in range(pd.GetNumberOfArrays()): 336 varr = pd.GetArray(i) 337 out += f"\n\x1b[1m\x1b[4mArray name : {varr.GetName()}\x1b[0m" 338 out += "\nindex".ljust(15) + f": {i}" 339 t = varr.GetDataType() 340 if t in vtki.array_types: 341 out += "\ntype".ljust(15) 342 out += f": {vtki.array_types[t]}" 343 shape = (varr.GetNumberOfTuples(), varr.GetNumberOfComponents()) 344 out += "\nshape".ljust(15) + f": {shape}" 345 out += "\nrange".ljust(15) + f": {np.array(varr.GetRange())}" 346 out += "\nmax id".ljust(15) + f": {varr.GetMaxId()}" 347 out += "\nlook up table".ljust(15) + f": {bool(varr.GetLookupTable())}" 348 out += "\nin-memory size".ljust(15) + f": {varr.GetActualMemorySize()} KB" 349 else: 350 out += " is empty.\x1b[0m" 351 return out 352 353 if self.association == 0: 354 out = _get_str(self.obj.dataset.GetPointData(), "Point Data") 355 elif self.association == 1: 356 out = _get_str(self.obj.dataset.GetCellData(), "Cell Data") 357 elif self.association == 2: 358 pd = self.obj.dataset.GetFieldData() 359 if pd.GetNumberOfArrays(): 360 out = "\x1b[2m\x1b[1m\x1b[7mMeta Data" 361 if self.obj.name: 362 out += f" in {self.obj.name}" 363 out += f" contains {pd.GetNumberOfArrays()} entries\x1b[0m" 364 for i in range(pd.GetNumberOfArrays()): 365 varr = pd.GetAbstractArray(i) 366 out += f"\n\x1b[1m\x1b[4mEntry name : {varr.GetName()}\x1b[0m" 367 out += "\nindex".ljust(15) + f": {i}" 368 shape = (varr.GetNumberOfTuples(), varr.GetNumberOfComponents()) 369 out += "\nshape".ljust(15) + f": {shape}" 370 371 return out 372 373 374############################################################################### 375class CommonAlgorithms: 376 """Common algorithms.""" 377 378 @property 379 def pointdata(self): 380 """ 381 Create and/or return a `numpy.array` associated to points (vertices). 382 A data array can be indexed either as a string or by an integer number. 383 E.g.: `myobj.pointdata["arrayname"]` 384 385 Usage: 386 387 `myobj.pointdata.keys()` to return the available data array names 388 389 `myobj.pointdata.select(name)` to make this array the active one 390 391 `myobj.pointdata.remove(name)` to remove this array 392 """ 393 return DataArrayHelper(self, 0) 394 395 @property 396 def celldata(self): 397 """ 398 Create and/or return a `numpy.array` associated to cells (faces). 399 A data array can be indexed either as a string or by an integer number. 400 E.g.: `myobj.celldata["arrayname"]` 401 402 Usage: 403 404 `myobj.celldata.keys()` to return the available data array names 405 406 `myobj.celldata.select(name)` to make this array the active one 407 408 `myobj.celldata.remove(name)` to remove this array 409 """ 410 return DataArrayHelper(self, 1) 411 412 @property 413 def metadata(self): 414 """ 415 Create and/or return a `numpy.array` associated to neither cells nor faces. 416 A data array can be indexed either as a string or by an integer number. 417 E.g.: `myobj.metadata["arrayname"]` 418 419 Usage: 420 421 `myobj.metadata.keys()` to return the available data array names 422 423 `myobj.metadata.select(name)` to make this array the active one 424 425 `myobj.metadata.remove(name)` to remove this array 426 """ 427 return DataArrayHelper(self, 2) 428 429 def memory_address(self) -> int: 430 """ 431 Return a unique memory address integer which may serve as the ID of the 432 object, or passed to c++ code. 433 """ 434 # https://www.linkedin.com/pulse/speedup-your-code-accessing-python-vtk-objects-from-c-pletzer/ 435 # https://github.com/tfmoraes/polydata_connectivity 436 return int(self.dataset.GetAddressAsString("")[5:], 16) 437 438 def memory_size(self) -> int: 439 """Return the size in bytes of the object in memory.""" 440 return self.dataset.GetActualMemorySize() 441 442 def modified(self) -> Self: 443 """Use in conjunction with `tonumpy()` to update any modifications to the image array.""" 444 self.dataset.GetPointData().Modified() 445 scals = self.dataset.GetPointData().GetScalars() 446 if scals: 447 scals.Modified() 448 return self 449 450 def box(self, scale=1, padding=0) -> "vedo.Mesh": 451 """ 452 Return the bounding box as a new `Mesh` object. 453 454 Arguments: 455 scale : (float) 456 box size can be scaled by a factor 457 padding : (float, list) 458 a constant padding can be added (can be a list `[padx,pady,padz]`) 459 """ 460 b = self.bounds() 461 if not utils.is_sequence(padding): 462 padding = [padding, padding, padding] 463 length, width, height = b[1] - b[0], b[3] - b[2], b[5] - b[4] 464 tol = (length + width + height) / 30000 # useful for boxing text 465 pos = [(b[0] + b[1]) / 2, (b[3] + b[2]) / 2, (b[5] + b[4]) / 2 - tol] 466 bx = vedo.shapes.Box( 467 pos, 468 length * scale + padding[0], 469 width * scale + padding[1], 470 height * scale + padding[2], 471 c="gray", 472 ) 473 try: 474 pr = vtki.vtkProperty() 475 pr.DeepCopy(self.properties) 476 bx.actor.SetProperty(pr) 477 bx.properties = pr 478 except (AttributeError, TypeError): 479 pass 480 bx.flat().lighting("off").wireframe(True) 481 return bx 482 483 def update_dataset(self, dataset, **kwargs) -> Self: 484 """Update the dataset of the object with the provided VTK dataset.""" 485 self._update(dataset, **kwargs) 486 return self 487 488 def bounds(self) -> np.ndarray: 489 """ 490 Get the object bounds. 491 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 492 """ 493 try: # this is very slow for large meshes 494 pts = self.vertices 495 xmin, ymin, zmin = np.min(pts, axis=0) 496 xmax, ymax, zmax = np.max(pts, axis=0) 497 return np.array([xmin, xmax, ymin, ymax, zmin, zmax]) 498 except (AttributeError, ValueError): 499 return np.array(self.dataset.GetBounds()) 500 501 def xbounds(self, i=None) -> np.ndarray: 502 """Get the bounds `[xmin,xmax]`. Can specify upper or lower with i (0,1).""" 503 b = self.bounds() 504 if i is not None: 505 return b[i] 506 return np.array([b[0], b[1]]) 507 508 def ybounds(self, i=None) -> np.ndarray: 509 """Get the bounds `[ymin,ymax]`. Can specify upper or lower with i (0,1).""" 510 b = self.bounds() 511 if i == 0: 512 return b[2] 513 if i == 1: 514 return b[3] 515 return np.array([b[2], b[3]]) 516 517 def zbounds(self, i=None) -> np.ndarray: 518 """Get the bounds `[zmin,zmax]`. Can specify upper or lower with i (0,1).""" 519 b = self.bounds() 520 if i == 0: 521 return b[4] 522 if i == 1: 523 return b[5] 524 return np.array([b[4], b[5]]) 525 526 def diagonal_size(self) -> float: 527 """Get the length of the diagonal of the bounding box.""" 528 b = self.bounds() 529 return np.sqrt((b[1] - b[0])**2 + (b[3] - b[2])**2 + (b[5] - b[4])**2) 530 531 def average_size(self) -> float: 532 """ 533 Calculate and return the average size of the object. 534 This is the mean of the vertex distances from the center of mass. 535 """ 536 coords = self.vertices 537 cm = np.mean(coords, axis=0) 538 if coords.shape[0] == 0: 539 return 0.0 540 cc = coords - cm 541 return np.mean(np.linalg.norm(cc, axis=1)) 542 543 def center_of_mass(self) -> np.ndarray: 544 """Get the center of mass of the object.""" 545 if isinstance(self, (vedo.RectilinearGrid, vedo.Volume)): 546 return np.array(self.dataset.GetCenter()) 547 cmf = vtki.new("CenterOfMass") 548 cmf.SetInputData(self.dataset) 549 cmf.Update() 550 c = cmf.GetCenter() 551 return np.array(c) 552 553 def copy_data_from(self, obj: Any) -> Self: 554 """Copy all data (point and cell data) from this input object""" 555 self.dataset.GetPointData().PassData(obj.dataset.GetPointData()) 556 self.dataset.GetCellData().PassData(obj.dataset.GetCellData()) 557 self.pipeline = utils.OperationNode( 558 "copy_data_from", 559 parents=[self, obj], 560 comment=f"{obj.__class__.__name__}", 561 shape="note", 562 c="#ccc5b9", 563 ) 564 return self 565 566 def inputdata(self): 567 """Obsolete, use `.dataset` instead.""" 568 colors.printc("WARNING: 'inputdata()' is obsolete, use '.dataset' instead.", c="y") 569 return self.dataset 570 571 @property 572 def npoints(self): 573 """Retrieve the number of points (or vertices).""" 574 return self.dataset.GetNumberOfPoints() 575 576 @property 577 def nvertices(self): 578 """Retrieve the number of vertices (or points).""" 579 return self.dataset.GetNumberOfPoints() 580 581 @property 582 def ncells(self): 583 """Retrieve the number of cells.""" 584 return self.dataset.GetNumberOfCells() 585 586 def points(self, pts=None): 587 """Obsolete, use `self.vertices` or `self.coordinates` instead.""" 588 if pts is None: ### getter 589 590 if warnings["points_getter"]: 591 colors.printc(warnings["points_getter"], c="y") 592 warnings["points_getter"] = "" 593 return self.vertices 594 595 else: ### setter 596 597 if warnings["points_setter"]: 598 colors.printc(warnings["points_setter"], c="y") 599 warnings["points_setter"] = "" 600 601 pts = np.asarray(pts, dtype=np.float32) 602 603 if pts.ndim == 1: 604 ### getter by point index ################### 605 indices = pts.astype(int) 606 vpts = self.dataset.GetPoints() 607 arr = utils.vtk2numpy(vpts.GetData()) 608 return arr[indices] ########### 609 610 ### setter #################################### 611 if pts.shape[1] == 2: 612 pts = np.c_[pts, np.zeros(pts.shape[0], dtype=np.float32)] 613 arr = utils.numpy2vtk(pts, dtype=np.float32) 614 615 vpts = self.dataset.GetPoints() 616 vpts.SetData(arr) 617 vpts.Modified() 618 # reset mesh to identity matrix position/rotation: 619 self.point_locator = None 620 self.cell_locator = None 621 self.line_locator = None 622 self.transform = LinearTransform() 623 return self 624 625 @property 626 def cell_centers(self): 627 """ 628 Get the coordinates of the cell centers. 629 630 Examples: 631 - [delaunay2d.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/delaunay2d.py) 632 633 See also: `CellCenters()`. 634 """ 635 vcen = vtki.new("CellCenters") 636 vcen.CopyArraysOff() 637 vcen.SetInputData(self.dataset) 638 vcen.Update() 639 return utils.vtk2numpy(vcen.GetOutput().GetPoints().GetData()) 640 641 @property 642 def lines(self): 643 """ 644 Get lines connectivity ids as a python array 645 formatted as `[[id0,id1], [id3,id4], ...]` 646 647 See also: `lines_as_flat_array()`. 648 """ 649 # Get cell connettivity ids as a 1D array. The vtk format is: 650 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 651 try: 652 arr1d = utils.vtk2numpy(self.dataset.GetLines().GetData()) 653 except AttributeError: 654 return np.array([]) 655 i = 0 656 conn = [] 657 n = len(arr1d) 658 for _ in range(n): 659 cell = [arr1d[i + k + 1] for k in range(arr1d[i])] 660 conn.append(cell) 661 i += arr1d[i] + 1 662 if i >= n: 663 break 664 665 return conn # cannot always make a numpy array of it! 666 667 @property 668 def lines_as_flat_array(self): 669 """ 670 Get lines connectivity ids as a 1D numpy array. 671 Format is e.g. [2, 10,20, 3, 10,11,12, 2, 70,80, ...] 672 673 See also: `lines()`. 674 """ 675 try: 676 return utils.vtk2numpy(self.dataset.GetLines().GetData()) 677 except AttributeError: 678 return np.array([]) 679 680 def mark_boundaries(self) -> Self: 681 """ 682 Mark cells and vertices if they lie on a boundary. 683 A new array called `BoundaryCells` is added to the object. 684 """ 685 mb = vtki.new("MarkBoundaryFilter") 686 mb.SetInputData(self.dataset) 687 mb.Update() 688 self.dataset.DeepCopy(mb.GetOutput()) 689 self.pipeline = utils.OperationNode("mark_boundaries", parents=[self]) 690 return self 691 692 def find_cells_in_bounds(self, xbounds=(), ybounds=(), zbounds=()) -> np.ndarray: 693 """ 694 Find cells that are within the specified bounds. 695 """ 696 try: 697 xbounds = list(xbounds.bounds()) 698 except AttributeError: 699 pass 700 701 if len(xbounds) == 6: 702 bnds = xbounds 703 else: 704 bnds = list(self.bounds()) 705 if len(xbounds) == 2: 706 bnds[0] = xbounds[0] 707 bnds[1] = xbounds[1] 708 if len(ybounds) == 2: 709 bnds[2] = ybounds[0] 710 bnds[3] = ybounds[1] 711 if len(zbounds) == 2: 712 bnds[4] = zbounds[0] 713 bnds[5] = zbounds[1] 714 715 cell_ids = vtki.vtkIdList() 716 if not self.cell_locator: 717 self.cell_locator = vtki.new("CellTreeLocator") 718 self.cell_locator.SetDataSet(self.dataset) 719 self.cell_locator.BuildLocator() 720 self.cell_locator.FindCellsWithinBounds(bnds, cell_ids) 721 cids = [] 722 for i in range(cell_ids.GetNumberOfIds()): 723 cid = cell_ids.GetId(i) 724 cids.append(cid) 725 return np.array(cids) 726 727 def find_cells_along_line(self, p0, p1, tol=0.001) -> np.ndarray: 728 """ 729 Find cells that are intersected by a line segment. 730 """ 731 cell_ids = vtki.vtkIdList() 732 if not self.cell_locator: 733 self.cell_locator = vtki.new("CellTreeLocator") 734 self.cell_locator.SetDataSet(self.dataset) 735 self.cell_locator.BuildLocator() 736 self.cell_locator.FindCellsAlongLine(p0, p1, tol, cell_ids) 737 cids = [] 738 for i in range(cell_ids.GetNumberOfIds()): 739 cid = cell_ids.GetId(i) 740 cids.append(cid) 741 return np.array(cids) 742 743 def find_cells_along_plane(self, origin, normal, tol=0.001) -> np.ndarray: 744 """ 745 Find cells that are intersected by a plane. 746 """ 747 cell_ids = vtki.vtkIdList() 748 if not self.cell_locator: 749 self.cell_locator = vtki.new("CellTreeLocator") 750 self.cell_locator.SetDataSet(self.dataset) 751 self.cell_locator.BuildLocator() 752 self.cell_locator.FindCellsAlongPlane(origin, normal, tol, cell_ids) 753 cids = [] 754 for i in range(cell_ids.GetNumberOfIds()): 755 cid = cell_ids.GetId(i) 756 cids.append(cid) 757 return np.array(cids) 758 759 def keep_cell_types(self, types=()): 760 """ 761 Extract cells of a specific type. 762 763 Check the VTK cell types here: 764 https://vtk.org/doc/nightly/html/vtkCellType_8h.html 765 """ 766 fe = vtki.new("ExtractCellsByType") 767 fe.SetInputData(self.dataset) 768 for t in types: 769 try: 770 if utils.is_integer(t): 771 it = t 772 else: 773 it = vtki.cell_types[t.upper()] 774 except KeyError: 775 vedo.logger.error(f"Cell type '{t}' not recognized") 776 continue 777 fe.AddCellType(it) 778 fe.Update() 779 self._update(fe.GetOutput()) 780 return self 781 782 def map_cells_to_points(self, arrays=(), move=False) -> Self: 783 """ 784 Interpolate cell data (i.e., data specified per cell or face) 785 into point data (i.e., data specified at each vertex). 786 The method of transformation is based on averaging the data values 787 of all cells using a particular point. 788 789 A custom list of arrays to be mapped can be passed in input. 790 791 Set `move=True` to delete the original `celldata` array. 792 """ 793 c2p = vtki.new("CellDataToPointData") 794 c2p.SetInputData(self.dataset) 795 if not move: 796 c2p.PassCellDataOn() 797 if arrays: 798 c2p.ClearCellDataArrays() 799 c2p.ProcessAllArraysOff() 800 for arr in arrays: 801 c2p.AddCellDataArray(arr) 802 else: 803 c2p.ProcessAllArraysOn() 804 c2p.Update() 805 self._update(c2p.GetOutput(), reset_locators=False) 806 self.mapper.SetScalarModeToUsePointData() 807 self.pipeline = utils.OperationNode("map_cells_to_points", parents=[self]) 808 return self 809 810 @property 811 def vertices(self): 812 """Return the vertices (points) coordinates.""" 813 try: 814 # for polydata and unstructured grid 815 varr = self.dataset.GetPoints().GetData() 816 except (AttributeError, TypeError): 817 try: 818 # for RectilinearGrid, StructuredGrid 819 vpts = vtki.vtkPoints() 820 self.dataset.GetPoints(vpts) 821 varr = vpts.GetData() 822 except (AttributeError, TypeError): 823 try: 824 # for ImageData 825 v2p = vtki.new("ImageToPoints") 826 v2p.SetInputData(self.dataset) 827 v2p.Update() 828 varr = v2p.GetOutput().GetPoints().GetData() 829 except AttributeError: 830 return np.array([]) 831 832 return utils.vtk2numpy(varr) 833 834 # setter 835 @vertices.setter 836 def vertices(self, pts): 837 """Set vertices (points) coordinates.""" 838 pts = utils.make3d(pts) 839 arr = utils.numpy2vtk(pts, dtype=np.float32) 840 try: 841 vpts = self.dataset.GetPoints() 842 vpts.SetData(arr) 843 vpts.Modified() 844 except (AttributeError, TypeError): 845 vedo.logger.error(f"Cannot set vertices for {type(self)}") 846 return self 847 # reset mesh to identity matrix position/rotation: 848 self.point_locator = None 849 self.cell_locator = None 850 self.line_locator = None 851 self.transform = LinearTransform() 852 853 @property 854 def coordinates(self): 855 """Return the vertices (points) coordinates. Same as `vertices`.""" 856 return self.vertices 857 858 @coordinates.setter 859 def coordinates(self, pts): 860 """Set vertices (points) coordinates. Same as `vertices`.""" 861 self.vertices = pts 862 863 @property 864 def cells_as_flat_array(self): 865 """ 866 Get cell connectivity ids as a 1D numpy array. 867 Format is e.g. [3, 10,20,30 4, 10,11,12,13 ...] 868 """ 869 try: 870 # valid for unstructured grid 871 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 872 except AttributeError: 873 # valid for polydata 874 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 875 return arr1d 876 877 @property 878 def cells(self): 879 """ 880 Get the cells connectivity ids as a numpy array. 881 882 The output format is: `[[id0 ... idn], [id0 ... idm], etc]`. 883 """ 884 try: 885 # valid for unstructured grid 886 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 887 except AttributeError: 888 try: 889 # valid for polydata 890 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 891 except AttributeError: 892 vedo.logger.warning(f"Cannot get cells for {type(self)}") 893 return np.array([]) 894 895 # Get cell connettivity ids as a 1D array. vtk format is: 896 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 897 i = 0 898 conn = [] 899 n = len(arr1d) 900 if n: 901 while True: 902 cell = [arr1d[i + k] for k in range(1, arr1d[i] + 1)] 903 conn.append(cell) 904 i += arr1d[i] + 1 905 if i >= n: 906 break 907 return conn 908 909 def map_points_to_cells(self, arrays=(), move=False) -> Self: 910 """ 911 Interpolate point data (i.e., data specified per point or vertex) 912 into cell data (i.e., data specified per cell). 913 The method of transformation is based on averaging the data values 914 of all points defining a particular cell. 915 916 A custom list of arrays to be mapped can be passed in input. 917 918 Set `move=True` to delete the original `pointdata` array. 919 920 Examples: 921 - [mesh_map2cell.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/mesh_map2cell.py) 922 """ 923 p2c = vtki.new("PointDataToCellData") 924 p2c.SetInputData(self.dataset) 925 if not move: 926 p2c.PassPointDataOn() 927 if arrays: 928 p2c.ClearPointDataArrays() 929 p2c.ProcessAllArraysOff() 930 for arr in arrays: 931 p2c.AddPointDataArray(arr) 932 else: 933 p2c.ProcessAllArraysOn() 934 p2c.Update() 935 self._update(p2c.GetOutput(), reset_locators=False) 936 self.mapper.SetScalarModeToUseCellData() 937 self.pipeline = utils.OperationNode("map_points_to_cells", parents=[self]) 938 return self 939 940 def resample_data_from(self, source, tol=None, categorical=False) -> Self: 941 """ 942 Resample point and cell data from another dataset. 943 The output has the same structure but its point data have 944 the resampled values from target. 945 946 Use `tol` to set the tolerance used to compute whether 947 a point in the source is in a cell of the current object. 948 Points without resampled values, and their cells, are marked as blank. 949 If the data is categorical, then the resulting data will be determined 950 by a nearest neighbor interpolation scheme. 951 952 Example: 953 ```python 954 from vedo import * 955 m1 = Mesh(dataurl+'bunny.obj')#.add_gaussian_noise(0.1) 956 pts = m1.vertices 957 ces = m1.cell_centers 958 m1.pointdata["xvalues"] = np.power(pts[:,0], 3) 959 m1.celldata["yvalues"] = np.power(ces[:,1], 3) 960 m2 = Mesh(dataurl+'bunny.obj') 961 m2.resample_data_from(m1) 962 # print(m2.pointdata["xvalues"]) 963 show(m1, m2 , N=2, axes=1) 964 ``` 965 """ 966 rs = vtki.new("ResampleWithDataSet") 967 rs.SetInputData(self.dataset) 968 rs.SetSourceData(source.dataset) 969 970 rs.SetPassPointArrays(True) 971 rs.SetPassCellArrays(True) 972 rs.SetPassFieldArrays(True) 973 rs.SetCategoricalData(categorical) 974 975 rs.SetComputeTolerance(True) 976 if tol: 977 rs.SetComputeTolerance(False) 978 rs.SetTolerance(tol) 979 rs.Update() 980 self._update(rs.GetOutput(), reset_locators=False) 981 self.pipeline = utils.OperationNode( 982 "resample_data_from", 983 comment=f"{source.__class__.__name__}", 984 parents=[self, source], 985 ) 986 return self 987 988 def interpolate_data_from( 989 self, 990 source, 991 radius=None, 992 n=None, 993 kernel="shepard", 994 exclude=("Normals",), 995 on="points", 996 null_strategy=1, 997 null_value=0, 998 ) -> Self: 999 """ 1000 Interpolate over source to port its data onto the current object using various kernels. 1001 1002 If n (number of closest points to use) is set then radius value is ignored. 1003 1004 Check out also: 1005 `probe()` which in many cases can be faster. 1006 1007 Arguments: 1008 kernel : (str) 1009 available kernels are [shepard, gaussian, linear] 1010 null_strategy : (int) 1011 specify a strategy to use when encountering a "null" point 1012 during the interpolation process. Null points occur when the local neighborhood 1013 (of nearby points to interpolate from) is empty. 1014 1015 - Case 0: an output array is created that marks points 1016 as being valid (=1) or null (invalid =0), and the null_value is set as well 1017 - Case 1: the output data value(s) are set to the provided null_value 1018 - Case 2: simply use the closest point to perform the interpolation. 1019 null_value : (float) 1020 see above. 1021 1022 Examples: 1023 - [interpolate_scalar1.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar1.py) 1024 - [interpolate_scalar3.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar3.py) 1025 - [interpolate_scalar4.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar4.py) 1026 - [image_probe.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/image_probe.py) 1027 1028 ![](https://vedo.embl.es/images/advanced/interpolateMeshArray.png) 1029 """ 1030 if radius is None and not n: 1031 vedo.logger.error("in interpolate_data_from(): please set either radius or n") 1032 raise RuntimeError 1033 1034 if on == "points": 1035 points = source.dataset 1036 elif on == "cells": 1037 c2p = vtki.new("CellDataToPointData") 1038 c2p.SetInputData(source.dataset) 1039 c2p.Update() 1040 points = c2p.GetOutput() 1041 else: 1042 vedo.logger.error("in interpolate_data_from(), on must be on points or cells") 1043 raise RuntimeError() 1044 1045 locator = vtki.new("PointLocator") 1046 locator.SetDataSet(points) 1047 locator.BuildLocator() 1048 1049 if kernel.lower() == "shepard": 1050 kern = vtki.new("ShepardKernel") 1051 kern.SetPowerParameter(2) 1052 elif kernel.lower() == "gaussian": 1053 kern = vtki.new("GaussianKernel") 1054 kern.SetSharpness(2) 1055 elif kernel.lower() == "linear": 1056 kern = vtki.new("LinearKernel") 1057 else: 1058 vedo.logger.error("available kernels are: [shepard, gaussian, linear]") 1059 raise RuntimeError() 1060 1061 if n: 1062 kern.SetNumberOfPoints(n) 1063 kern.SetKernelFootprintToNClosest() 1064 else: 1065 kern.SetRadius(radius) 1066 kern.SetKernelFootprintToRadius() 1067 1068 interpolator = vtki.new("PointInterpolator") 1069 interpolator.SetInputData(self.dataset) 1070 interpolator.SetSourceData(points) 1071 interpolator.SetKernel(kern) 1072 interpolator.SetLocator(locator) 1073 interpolator.PassFieldArraysOn() 1074 interpolator.SetNullPointsStrategy(null_strategy) 1075 interpolator.SetNullValue(null_value) 1076 interpolator.SetValidPointsMaskArrayName("ValidPointMask") 1077 for ex in exclude: 1078 interpolator.AddExcludedArray(ex) 1079 interpolator.Update() 1080 1081 if on == "cells": 1082 p2c = vtki.new("PointDataToCellData") 1083 p2c.SetInputData(interpolator.GetOutput()) 1084 p2c.Update() 1085 cpoly = p2c.GetOutput() 1086 else: 1087 cpoly = interpolator.GetOutput() 1088 1089 self._update(cpoly, reset_locators=False) 1090 1091 self.pipeline = utils.OperationNode("interpolate_data_from", parents=[self, source]) 1092 return self 1093 1094 def add_ids(self) -> Self: 1095 """ 1096 Generate point and cell ids arrays. 1097 1098 Two new arrays are added to the mesh: `PointID` and `CellID`. 1099 """ 1100 ids = vtki.new("IdFilter") 1101 ids.SetInputData(self.dataset) 1102 ids.PointIdsOn() 1103 ids.CellIdsOn() 1104 ids.FieldDataOff() 1105 ids.SetPointIdsArrayName("PointID") 1106 ids.SetCellIdsArrayName("CellID") 1107 ids.Update() 1108 self._update(ids.GetOutput(), reset_locators=False) 1109 self.pipeline = utils.OperationNode("add_ids", parents=[self]) 1110 return self 1111 1112 def gradient(self, input_array=None, on="points", fast=False) -> np.ndarray: 1113 """ 1114 Compute and return the gradiend of the active scalar field as a numpy array. 1115 1116 Arguments: 1117 input_array : (str) 1118 array of the scalars to compute the gradient, 1119 if None the current active array is selected 1120 on : (str) 1121 compute either on 'points' or 'cells' data 1122 fast : (bool) 1123 if True, will use a less accurate algorithm 1124 that performs fewer derivative calculations (and is therefore faster). 1125 1126 Examples: 1127 - [isolines.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/isolines.py) 1128 1129 ![](https://user-images.githubusercontent.com/32848391/72433087-f00a8780-3798-11ea-9778-991f0abeca70.png) 1130 """ 1131 gra = vtki.new("GradientFilter") 1132 if on.startswith("p"): 1133 varr = self.dataset.GetPointData() 1134 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1135 elif on.startswith("c"): 1136 varr = self.dataset.GetCellData() 1137 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1138 else: 1139 vedo.logger.error(f"in gradient: unknown option {on}") 1140 raise RuntimeError 1141 1142 if input_array is None: 1143 if varr.GetScalars(): 1144 input_array = varr.GetScalars().GetName() 1145 else: 1146 vedo.logger.error(f"in gradient: no scalars found for {on}") 1147 raise RuntimeError 1148 1149 gra.SetInputData(self.dataset) 1150 gra.SetInputScalars(tp, input_array) 1151 gra.SetResultArrayName("Gradient") 1152 gra.SetFasterApproximation(fast) 1153 gra.ComputeDivergenceOff() 1154 gra.ComputeVorticityOff() 1155 gra.ComputeGradientOn() 1156 gra.Update() 1157 if on.startswith("p"): 1158 gvecs = utils.vtk2numpy(gra.GetOutput().GetPointData().GetArray("Gradient")) 1159 else: 1160 gvecs = utils.vtk2numpy(gra.GetOutput().GetCellData().GetArray("Gradient")) 1161 return gvecs 1162 1163 def divergence(self, array_name=None, on="points", fast=False) -> np.ndarray: 1164 """ 1165 Compute and return the divergence of a vector field as a numpy array. 1166 1167 Arguments: 1168 array_name : (str) 1169 name of the array of vectors to compute the divergence, 1170 if None the current active array is selected 1171 on : (str) 1172 compute either on 'points' or 'cells' data 1173 fast : (bool) 1174 if True, will use a less accurate algorithm 1175 that performs fewer derivative calculations (and is therefore faster). 1176 """ 1177 div = vtki.new("GradientFilter") 1178 if on.startswith("p"): 1179 varr = self.dataset.GetPointData() 1180 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1181 elif on.startswith("c"): 1182 varr = self.dataset.GetCellData() 1183 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1184 else: 1185 vedo.logger.error(f"in divergence(): unknown option {on}") 1186 raise RuntimeError 1187 1188 if array_name is None: 1189 if varr.GetVectors(): 1190 array_name = varr.GetVectors().GetName() 1191 else: 1192 vedo.logger.error(f"in divergence(): no vectors found for {on}") 1193 raise RuntimeError 1194 1195 div.SetInputData(self.dataset) 1196 div.SetInputScalars(tp, array_name) 1197 div.ComputeDivergenceOn() 1198 div.ComputeGradientOff() 1199 div.ComputeVorticityOff() 1200 div.SetDivergenceArrayName("Divergence") 1201 div.SetFasterApproximation(fast) 1202 div.Update() 1203 if on.startswith("p"): 1204 dvecs = utils.vtk2numpy(div.GetOutput().GetPointData().GetArray("Divergence")) 1205 else: 1206 dvecs = utils.vtk2numpy(div.GetOutput().GetCellData().GetArray("Divergence")) 1207 return dvecs 1208 1209 def vorticity(self, array_name=None, on="points", fast=False) -> np.ndarray: 1210 """ 1211 Compute and return the vorticity of a vector field as a numpy array. 1212 1213 Arguments: 1214 array_name : (str) 1215 name of the array to compute the vorticity, 1216 if None the current active array is selected 1217 on : (str) 1218 compute either on 'points' or 'cells' data 1219 fast : (bool) 1220 if True, will use a less accurate algorithm 1221 that performs fewer derivative calculations (and is therefore faster). 1222 """ 1223 vort = vtki.new("GradientFilter") 1224 if on.startswith("p"): 1225 varr = self.dataset.GetPointData() 1226 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1227 elif on.startswith("c"): 1228 varr = self.dataset.GetCellData() 1229 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1230 else: 1231 vedo.logger.error(f"in vorticity(): unknown option {on}") 1232 raise RuntimeError 1233 1234 if array_name is None: 1235 if varr.GetVectors(): 1236 array_name = varr.GetVectors().GetName() 1237 else: 1238 vedo.logger.error(f"in vorticity(): no vectors found for {on}") 1239 raise RuntimeError 1240 1241 vort.SetInputData(self.dataset) 1242 vort.SetInputScalars(tp, array_name) 1243 vort.ComputeDivergenceOff() 1244 vort.ComputeGradientOff() 1245 vort.ComputeVorticityOn() 1246 vort.SetVorticityArrayName("Vorticity") 1247 vort.SetFasterApproximation(fast) 1248 vort.Update() 1249 if on.startswith("p"): 1250 vvecs = utils.vtk2numpy(vort.GetOutput().GetPointData().GetArray("Vorticity")) 1251 else: 1252 vvecs = utils.vtk2numpy(vort.GetOutput().GetCellData().GetArray("Vorticity")) 1253 return vvecs 1254 1255 def probe(self, source) -> Self: 1256 """ 1257 Takes a data set and probes its scalars at the specified points in space. 1258 1259 Note that a mask is also output with valid/invalid points which can be accessed 1260 with `mesh.pointdata['ValidPointMask']`. 1261 1262 Check out also: 1263 `interpolate_data_from()` 1264 1265 Examples: 1266 - [probe_points.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/probe_points.py) 1267 1268 ![](https://vedo.embl.es/images/volumetric/probePoints.png) 1269 """ 1270 probe_filter = vtki.new("ProbeFilter") 1271 probe_filter.SetSourceData(source.dataset) 1272 probe_filter.SetInputData(self.dataset) 1273 probe_filter.Update() 1274 self._update(probe_filter.GetOutput(), reset_locators=False) 1275 self.pipeline = utils.OperationNode("probe", parents=[self, source]) 1276 self.pointdata.rename("vtkValidPointMask", "ValidPointMask") 1277 return self 1278 1279 def compute_cell_size(self) -> Self: 1280 """ 1281 Add to this object a cell data array 1282 containing the area, volume and edge length 1283 of the cells (when applicable to the object type). 1284 1285 Array names are: `Area`, `Volume`, `Length`. 1286 """ 1287 csf = vtki.new("CellSizeFilter") 1288 csf.SetInputData(self.dataset) 1289 csf.SetComputeArea(1) 1290 csf.SetComputeVolume(1) 1291 csf.SetComputeLength(1) 1292 csf.SetComputeVertexCount(0) 1293 csf.SetAreaArrayName("Area") 1294 csf.SetVolumeArrayName("Volume") 1295 csf.SetLengthArrayName("Length") 1296 csf.Update() 1297 self._update(csf.GetOutput(), reset_locators=False) 1298 return self 1299 1300 def generate_random_data(self) -> Self: 1301 """Fill a dataset with random attributes""" 1302 gen = vtki.new("RandomAttributeGenerator") 1303 gen.SetInputData(self.dataset) 1304 gen.GenerateAllDataOn() 1305 gen.SetDataTypeToFloat() 1306 gen.GeneratePointNormalsOff() 1307 gen.GeneratePointTensorsOn() 1308 gen.GenerateCellScalarsOn() 1309 gen.Update() 1310 self._update(gen.GetOutput(), reset_locators=False) 1311 self.pipeline = utils.OperationNode("generate_random_data", parents=[self]) 1312 return self 1313 1314 def integrate_data(self) -> dict: 1315 """ 1316 Integrate point and cell data arrays while computing length, 1317 area or volume of the domain. It works for 1D, 2D or 3D cells. 1318 1319 For volumetric datasets, this filter ignores all but 3D cells. 1320 It will not compute the volume contained in a closed surface. 1321 1322 Returns a dictionary with keys: `pointdata`, `celldata`, `metadata`, 1323 which contain the integration result for the corresponding attributes. 1324 1325 Examples: 1326 ```python 1327 from vedo import * 1328 surf = Sphere(res=100) 1329 surf.pointdata['scalars'] = np.ones(surf.npoints) 1330 data = surf.integrate_data() 1331 print(data['pointdata']['scalars'], "is equal to 4pi", 4*np.pi) 1332 ``` 1333 1334 ```python 1335 from vedo import * 1336 1337 xcoords1 = np.arange(0, 2.2, 0.2) 1338 xcoords2 = sqrt(np.arange(0, 4.2, 0.2)) 1339 1340 ycoords = np.arange(0, 1.2, 0.2) 1341 1342 surf1 = Grid(s=(xcoords1, ycoords)).rotate_y(-45).lw(2) 1343 surf2 = Grid(s=(xcoords2, ycoords)).rotate_y(-45).lw(2) 1344 1345 surf1.pointdata['scalars'] = surf1.vertices[:,2] 1346 surf2.pointdata['scalars'] = surf2.vertices[:,2] 1347 1348 data1 = surf1.integrate_data() 1349 data2 = surf2.integrate_data() 1350 1351 print(data1['pointdata']['scalars'], 1352 "is equal to", 1353 data2['pointdata']['scalars'], 1354 "even if the grids are different!", 1355 "(= the volume under the surface)" 1356 ) 1357 show(surf1, surf2, N=2, axes=1).close() 1358 ``` 1359 """ 1360 vinteg = vtki.new("IntegrateAttributes") 1361 vinteg.SetInputData(self.dataset) 1362 vinteg.Update() 1363 ugrid = vedo.UnstructuredGrid(vinteg.GetOutput()) 1364 data = dict( 1365 pointdata=ugrid.pointdata.todict(), 1366 celldata=ugrid.celldata.todict(), 1367 metadata=ugrid.metadata.todict(), 1368 ) 1369 return data 1370 1371 def write(self, filename, binary=True) -> None: 1372 """Write object to file.""" 1373 out = vedo.file_io.write(self, filename, binary) 1374 out.pipeline = utils.OperationNode( 1375 "write", parents=[self], comment=filename[:15], shape="folder", c="#8a817c" 1376 ) 1377 1378 def tomesh(self, bounds=(), shrink=0) -> "vedo.Mesh": 1379 """ 1380 Extract boundary geometry from dataset (or convert data to polygonal type). 1381 1382 Two new arrays are added to the mesh: `OriginalCellIds` and `OriginalPointIds` 1383 to keep track of the original mesh elements. 1384 1385 Arguments: 1386 bounds : (list) 1387 specify a sub-region to extract 1388 shrink : (float) 1389 shrink the cells to a fraction of their original size 1390 """ 1391 geo = vtki.new("GeometryFilter") 1392 1393 if shrink: 1394 sf = vtki.new("ShrinkFilter") 1395 sf.SetInputData(self.dataset) 1396 sf.SetShrinkFactor(shrink) 1397 sf.Update() 1398 geo.SetInputData(sf.GetOutput()) 1399 else: 1400 geo.SetInputData(self.dataset) 1401 1402 geo.SetPassThroughCellIds(1) 1403 geo.SetPassThroughPointIds(1) 1404 geo.SetOriginalCellIdsName("OriginalCellIds") 1405 geo.SetOriginalPointIdsName("OriginalPointIds") 1406 geo.SetNonlinearSubdivisionLevel(1) 1407 # geo.MergingOff() # crashes on StructuredGrids 1408 if bounds: 1409 geo.SetExtent(bounds) 1410 geo.ExtentClippingOn() 1411 geo.Update() 1412 msh = vedo.mesh.Mesh(geo.GetOutput()) 1413 msh.pipeline = utils.OperationNode("tomesh", parents=[self], c="#9e2a2b") 1414 return msh 1415 1416 def signed_distance(self, dims=(20, 20, 20), bounds=None, invert=False, max_radius=None) -> "vedo.Volume": 1417 """ 1418 Compute the `Volume` object whose voxels contains the signed distance from 1419 the object. The calling object must have "Normals" defined. 1420 1421 Arguments: 1422 bounds : (list, actor) 1423 bounding box sizes 1424 dims : (list) 1425 dimensions (nr. of voxels) of the output volume. 1426 invert : (bool) 1427 flip the sign 1428 max_radius : (float) 1429 specify how far out to propagate distance calculation 1430 1431 Examples: 1432 - [distance2mesh.py](https://github.com/marcomusy/vedo/blob/master/examples/basic/distance2mesh.py) 1433 1434 ![](https://vedo.embl.es/images/basic/distance2mesh.png) 1435 """ 1436 if bounds is None: 1437 bounds = self.bounds() 1438 if max_radius is None: 1439 max_radius = self.diagonal_size() / 2 1440 dist = vtki.new("SignedDistance") 1441 dist.SetInputData(self.dataset) 1442 dist.SetRadius(max_radius) 1443 dist.SetBounds(bounds) 1444 dist.SetDimensions(dims) 1445 dist.Update() 1446 img = dist.GetOutput() 1447 if invert: 1448 mat = vtki.new("ImageMathematics") 1449 mat.SetInput1Data(img) 1450 mat.SetOperationToMultiplyByK() 1451 mat.SetConstantK(-1) 1452 mat.Update() 1453 img = mat.GetOutput() 1454 1455 vol = vedo.Volume(img) 1456 vol.name = "SignedDistanceVolume" 1457 vol.pipeline = utils.OperationNode( 1458 "signed_distance", 1459 parents=[self], 1460 comment=f"dims={tuple(vol.dimensions())}", 1461 c="#e9c46a:#0096c7", 1462 ) 1463 return vol 1464 1465 def unsigned_distance( 1466 self, dims=(25,25,25), bounds=(), max_radius=0, cap_value=0) -> "vedo.Volume": 1467 """ 1468 Compute the `Volume` object whose voxels contains the unsigned distance. 1469 """ 1470 dist = vtki.new("UnsignedDistance") 1471 dist.SetInputData(self.dataset) 1472 dist.SetDimensions(dims) 1473 1474 if len(bounds) == 6: 1475 dist.SetBounds(bounds) 1476 # elif bounds == "auto": 1477 # dist.AdjustBoundsOn() 1478 else: 1479 dist.SetBounds(self.bounds()) 1480 if not max_radius: 1481 max_radius = self.diagonal_size() / 10 1482 dist.SetRadius(max_radius) 1483 1484 if self.point_locator: 1485 dist.SetLocator(self.point_locator) 1486 1487 if cap_value is not None: 1488 dist.CappingOn() 1489 dist.SetCapValue(cap_value) 1490 dist.SetOutputScalarTypeToFloat() 1491 dist.Update() 1492 vol = vedo.Volume(dist.GetOutput()) 1493 vol.name = "UnsignedDistanceVolume" 1494 vol.pipeline = utils.OperationNode( 1495 "unsigned_distance", parents=[self], c="#e9c46a:#0096c7") 1496 return vol 1497 1498 def smooth_data(self, 1499 niter=10, relaxation_factor=0.1, strategy=0, mask=None, 1500 exclude=("Normals", "TextureCoordinates"), 1501 ) -> Self: 1502 """ 1503 Smooth point attribute data using distance weighted Laplacian kernel. 1504 1505 The effect is to blur regions of high variation and emphasize low variation regions. 1506 1507 Arguments: 1508 niter : (int) 1509 number of iterations 1510 relaxation_factor : (float) 1511 relaxation factor controlling the amount of Laplacian smoothing applied 1512 strategy : (int) 1513 strategy to use for Laplacian smoothing 1514 - 0: use all points, all point data attributes are smoothed 1515 - 1: smooth all point attribute data except those on the boundary 1516 - 2: only point data connected to a boundary point are smoothed 1517 mask : (str, np.ndarray) 1518 array to be used as a mask (ignore then the strategy keyword) 1519 exclude : (list) 1520 list of arrays to be excluded from smoothing 1521 """ 1522 try: 1523 saf = vtki.new("AttributeSmoothingFilter") 1524 except: 1525 vedo.logger.error("smooth_data() only avaialble in VTK>=9.3.0") 1526 return self 1527 saf.SetInputData(self.dataset) 1528 saf.SetRelaxationFactor(relaxation_factor) 1529 saf.SetNumberOfIterations(niter) 1530 1531 for ex in exclude: 1532 saf.AddExcludedArray(ex) 1533 1534 saf.SetWeightsTypeToDistance2() 1535 1536 saf.SetSmoothingStrategy(strategy) 1537 if mask is not None: 1538 saf.SetSmoothingStrategyToSmoothingMask() 1539 if isinstance(mask, str): 1540 mask_ = self.dataset.GetPointData().GetArray(mask) 1541 if not mask_: 1542 vedo.logger.error(f"smooth_data(): mask array {mask} not found") 1543 return self 1544 mask_array = vtki.vtkUnsignedCharArray() 1545 mask_array.ShallowCopy(mask_) 1546 mask_array.SetName(mask_.GetName()) 1547 else: 1548 mask_array = utils.numpy2vtk(mask, dtype=np.uint8) 1549 saf.SetSmoothingMask(mask_array) 1550 1551 saf.Update() 1552 1553 self._update(saf.GetOutput()) 1554 self.pipeline = utils.OperationNode( 1555 "smooth_data", comment=f"strategy {strategy}", parents=[self], c="#9e2a2b" 1556 ) 1557 return self 1558 1559 def compute_streamlines( 1560 self, 1561 seeds: Any, 1562 integrator="rk4", 1563 direction="forward", 1564 initial_step_size=None, 1565 max_propagation=None, 1566 max_steps=10000, 1567 step_length=0, 1568 surface_constrained=False, 1569 compute_vorticity=False, 1570 ) -> Union["vedo.Lines", None]: 1571 """ 1572 Integrate a vector field to generate streamlines. 1573 1574 Arguments: 1575 seeds : (Mesh, Points, list) 1576 starting points of the streamlines 1577 integrator : (str) 1578 type of integration method to be used: 1579 - "rk2" (Runge-Kutta 2) 1580 - "rk4" (Runge-Kutta 4) 1581 - "rk45" (Runge-Kutta 45) 1582 direction : (str) 1583 direction of integration, either "forward", "backward" or "both" 1584 initial_step_size : (float) 1585 initial step size used for line integration 1586 max_propagation : (float) 1587 maximum length of a streamline expressed in absolute units 1588 max_steps : (int) 1589 maximum number of steps for a streamline 1590 step_length : (float) 1591 maximum length of a step expressed in absolute units 1592 surface_constrained : (bool) 1593 whether to stop integrating when the streamline leaves the surface 1594 compute_vorticity : (bool) 1595 whether to compute the vorticity at each streamline point 1596 """ 1597 b = self.dataset.GetBounds() 1598 size = (b[5]-b[4] + b[3]-b[2] + b[1]-b[0]) / 3 1599 if initial_step_size is None: 1600 initial_step_size = size / 1000.0 1601 1602 if max_propagation is None: 1603 max_propagation = size * 2 1604 1605 if utils.is_sequence(seeds): 1606 seeds = vedo.Points(seeds) 1607 1608 sti = vtki.new("StreamTracer") 1609 sti.SetSourceData(seeds.dataset) 1610 if isinstance(self, vedo.RectilinearGrid): 1611 sti.SetInputData(vedo.UnstructuredGrid(self.dataset).dataset) 1612 else: 1613 sti.SetInputDataObject(self.dataset) 1614 1615 sti.SetInitialIntegrationStep(initial_step_size) 1616 sti.SetComputeVorticity(compute_vorticity) 1617 sti.SetMaximumNumberOfSteps(max_steps) 1618 sti.SetMaximumPropagation(max_propagation) 1619 sti.SetSurfaceStreamlines(surface_constrained) 1620 if step_length: 1621 sti.SetMaximumIntegrationStep(step_length) 1622 1623 if "for" in direction: 1624 sti.SetIntegrationDirectionToForward() 1625 elif "back" in direction: 1626 sti.SetIntegrationDirectionToBackward() 1627 elif "both" in direction: 1628 sti.SetIntegrationDirectionToBoth() 1629 else: 1630 vedo.logger.error(f"in compute_streamlines(), unknown direction {direction}") 1631 return None 1632 1633 if integrator == "rk2": 1634 sti.SetIntegratorTypeToRungeKutta2() 1635 elif integrator == "rk4": 1636 sti.SetIntegratorTypeToRungeKutta4() 1637 elif integrator == "rk45": 1638 sti.SetIntegratorTypeToRungeKutta45() 1639 else: 1640 vedo.logger.error(f"in compute_streamlines(), unknown integrator {integrator}") 1641 return None 1642 1643 sti.Update() 1644 1645 stlines = vedo.shapes.Lines(sti.GetOutput(), lw=4) 1646 stlines.name = "StreamLines" 1647 self.pipeline = utils.OperationNode( 1648 "compute_streamlines", comment=f"{integrator}", parents=[self, seeds], c="#9e2a2b" 1649 ) 1650 return stlines 1651 1652############################################################################### 1653class PointAlgorithms(CommonAlgorithms): 1654 """Methods for point clouds.""" 1655 1656 def apply_transform(self, LT: Any, concatenate=True, deep_copy=True) -> Self: 1657 """ 1658 Apply a linear or non-linear transformation to the mesh polygonal data. 1659 1660 Example: 1661 ```python 1662 from vedo import Cube, show, settings 1663 settings.use_parallel_projection = True 1664 c1 = Cube().rotate_z(25).pos(2,1).mirror().alpha(0.5) 1665 T = c1.transform # rotate by 5 degrees, place at (2,1) 1666 c2 = Cube().c('red4').wireframe().lw(10).lighting('off') 1667 c2.apply_transform(T) 1668 show(c1, c2, "The 2 cubes should overlap!", axes=1).close() 1669 ``` 1670 1671 ![](https://vedo.embl.es/images/feats/apply_transform.png) 1672 """ 1673 if self.dataset.GetNumberOfPoints() == 0: 1674 return self 1675 1676 if isinstance(LT, LinearTransform): 1677 LT_is_linear = True 1678 tr = LT.T 1679 if LT.is_identity(): 1680 return self 1681 1682 elif isinstance(LT, (vtki.vtkMatrix4x4, vtki.vtkLinearTransform)) or utils.is_sequence(LT): 1683 LT_is_linear = True 1684 LT = LinearTransform(LT) 1685 tr = LT.T 1686 if LT.is_identity(): 1687 return self 1688 1689 elif isinstance(LT, NonLinearTransform): 1690 LT_is_linear = False 1691 tr = LT.T 1692 self.transform = LT # reset 1693 1694 elif isinstance(LT, vtki.vtkThinPlateSplineTransform): 1695 LT_is_linear = False 1696 tr = LT 1697 self.transform = NonLinearTransform(LT) # reset 1698 1699 else: 1700 vedo.logger.error(f"apply_transform(), unknown input type:\n{LT}") 1701 return self 1702 1703 ################ 1704 if LT_is_linear: 1705 if concatenate: 1706 try: 1707 # self.transform might still not be linear 1708 self.transform.concatenate(LT) 1709 except AttributeError: 1710 # in that case reset it 1711 self.transform = LinearTransform() 1712 1713 ################ 1714 if isinstance(self.dataset, vtki.vtkPolyData): 1715 tp = vtki.new("TransformPolyDataFilter") 1716 elif isinstance(self.dataset, vtki.vtkUnstructuredGrid): 1717 tp = vtki.new("TransformFilter") 1718 tp.TransformAllInputVectorsOn() 1719 # elif isinstance(self.dataset, vtki.vtkImageData): 1720 # tp = vtki.new("ImageReslice") 1721 # tp.SetInterpolationModeToCubic() 1722 # tp.SetResliceTransform(tr) 1723 else: 1724 vedo.logger.error(f"apply_transform(), unknown input type: {[self.dataset]}") 1725 return self 1726 1727 tp.SetTransform(tr) 1728 tp.SetInputData(self.dataset) 1729 tp.Update() 1730 out = tp.GetOutput() 1731 1732 if deep_copy: 1733 self.dataset.DeepCopy(out) 1734 else: 1735 self.dataset.ShallowCopy(out) 1736 1737 # reset the locators 1738 self.point_locator = None 1739 self.cell_locator = None 1740 self.line_locator = None 1741 return self 1742 1743 def apply_transform_from_actor(self) -> LinearTransform: 1744 """ 1745 Apply the current transformation of the actor to the data. 1746 Useful when manually moving an actor (eg. when pressing "a"). 1747 Returns the `LinearTransform` object. 1748 1749 Note that this method is automatically called when the window is closed, 1750 or the interactor style is changed. 1751 """ 1752 M = self.actor.GetMatrix() 1753 self.apply_transform(M) 1754 iden = vtki.vtkMatrix4x4() 1755 self.actor.PokeMatrix(iden) 1756 return LinearTransform(M) 1757 1758 def pos(self, x=None, y=None, z=None) -> Self: 1759 """Set/Get object position.""" 1760 if x is None: # get functionality 1761 return self.transform.position 1762 1763 if z is None and y is None: # assume x is of the form (x,y,z) 1764 if len(x) == 3: 1765 x, y, z = x 1766 else: 1767 x, y = x 1768 z = 0 1769 elif z is None: # assume x,y is of the form x, y 1770 z = 0 1771 1772 q = self.transform.position 1773 delta = [x, y, z] - q 1774 if delta[0] == delta[1] == delta[2] == 0: 1775 return self 1776 LT = LinearTransform().translate(delta) 1777 return self.apply_transform(LT) 1778 1779 def shift(self, dx=0, dy=0, dz=0) -> Self: 1780 """Add a vector to the current object position.""" 1781 if utils.is_sequence(dx): 1782 dx, dy, dz = utils.make3d(dx) 1783 if dx == dy == dz == 0: 1784 return self 1785 LT = LinearTransform().translate([dx, dy, dz]) 1786 return self.apply_transform(LT) 1787 1788 def x(self, val=None) -> Self: 1789 """Set/Get object position along x axis.""" 1790 p = self.transform.position 1791 if val is None: 1792 return p[0] 1793 self.pos(val, p[1], p[2]) 1794 return self 1795 1796 def y(self, val=None)-> Self: 1797 """Set/Get object position along y axis.""" 1798 p = self.transform.position 1799 if val is None: 1800 return p[1] 1801 self.pos(p[0], val, p[2]) 1802 return self 1803 1804 def z(self, val=None) -> Self: 1805 """Set/Get object position along z axis.""" 1806 p = self.transform.position 1807 if val is None: 1808 return p[2] 1809 self.pos(p[0], p[1], val) 1810 return self 1811 1812 def rotate(self, angle: float, axis=(1, 0, 0), point=(0, 0, 0), rad=False) -> Self: 1813 """ 1814 Rotate around an arbitrary `axis` passing through `point`. 1815 1816 Example: 1817 ```python 1818 from vedo import * 1819 c1 = Cube() 1820 c2 = c1.clone().c('violet').alpha(0.5) # copy of c1 1821 v = vector(0.2,1,0) 1822 p = vector(1,0,0) # axis passes through this point 1823 c2.rotate(90, axis=v, point=p) 1824 l = Line(-v+p, v+p).lw(3).c('red') 1825 show(c1, l, c2, axes=1).close() 1826 ``` 1827 1828 ![](https://vedo.embl.es/images/feats/rotate_axis.png) 1829 """ 1830 LT = LinearTransform() 1831 LT.rotate(angle, axis, point, rad) 1832 return self.apply_transform(LT) 1833 1834 def rotate_x(self, angle: float, rad=False, around=None) -> Self: 1835 """ 1836 Rotate around x-axis. If angle is in radians set `rad=True`. 1837 1838 Use `around` to define a pivoting point. 1839 """ 1840 if angle == 0: 1841 return self 1842 LT = LinearTransform().rotate_x(angle, rad, around) 1843 return self.apply_transform(LT) 1844 1845 def rotate_y(self, angle: float, rad=False, around=None) -> Self: 1846 """ 1847 Rotate around y-axis. If angle is in radians set `rad=True`. 1848 1849 Use `around` to define a pivoting point. 1850 """ 1851 if angle == 0: 1852 return self 1853 LT = LinearTransform().rotate_y(angle, rad, around) 1854 return self.apply_transform(LT) 1855 1856 def rotate_z(self, angle: float, rad=False, around=None) -> Self: 1857 """ 1858 Rotate around z-axis. If angle is in radians set `rad=True`. 1859 1860 Use `around` to define a pivoting point. 1861 """ 1862 if angle == 0: 1863 return self 1864 LT = LinearTransform().rotate_z(angle, rad, around) 1865 return self.apply_transform(LT) 1866 1867 def reorient(self, initaxis, newaxis, rotation=0, rad=False, xyplane=False) -> Self: 1868 """ 1869 Reorient the object to point to a new direction from an initial one. 1870 If `initaxis` is None, the object will be assumed in its "default" orientation. 1871 If `xyplane` is True, the object will be rotated to lie on the xy plane. 1872 1873 Use `rotation` to first rotate the object around its `initaxis`. 1874 """ 1875 q = self.transform.position 1876 LT = LinearTransform() 1877 LT.reorient(initaxis, newaxis, q, rotation, rad, xyplane) 1878 return self.apply_transform(LT) 1879 1880 def scale(self, s=None, reset=False, origin=True) -> Union[Self, np.array]: 1881 """ 1882 Set/get object's scaling factor. 1883 1884 Arguments: 1885 s : (list, float) 1886 scaling factor(s). 1887 reset : (bool) 1888 if True previous scaling factors are ignored. 1889 origin : (bool) 1890 if True scaling is applied with respect to object's position, 1891 otherwise is applied respect to (0,0,0). 1892 1893 Note: 1894 use `s=(sx,sy,sz)` to scale differently in the three coordinates. 1895 """ 1896 if s is None: 1897 return np.array(self.transform.T.GetScale()) 1898 1899 if not utils.is_sequence(s): 1900 s = [s, s, s] 1901 1902 LT = LinearTransform() 1903 if reset: 1904 old_s = np.array(self.transform.T.GetScale()) 1905 LT.scale(s / old_s) 1906 else: 1907 if origin is True: 1908 LT.scale(s, origin=self.transform.position) 1909 elif origin is False: 1910 LT.scale(s, origin=False) 1911 else: 1912 LT.scale(s, origin=origin) 1913 1914 return self.apply_transform(LT) 1915 1916 1917############################################################################### 1918class VolumeAlgorithms(CommonAlgorithms): 1919 """Methods for Volume objects.""" 1920 1921 def bounds(self) -> np.ndarray: 1922 """ 1923 Get the object bounds. 1924 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 1925 """ 1926 # OVERRIDE CommonAlgorithms.bounds() which is too slow 1927 return np.array(self.dataset.GetBounds()) 1928 1929 def isosurface(self, value=None, flying_edges=False) -> "vedo.mesh.Mesh": 1930 """ 1931 Return an `Mesh` isosurface extracted from the `Volume` object. 1932 1933 Set `value` as single float or list of values to draw the isosurface(s). 1934 Use flying_edges for faster results (but sometimes can interfere with `smooth()`). 1935 1936 Examples: 1937 - [isosurfaces1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces1.py) 1938 1939 ![](https://vedo.embl.es/images/volumetric/isosurfaces.png) 1940 """ 1941 scrange = self.dataset.GetScalarRange() 1942 1943 if flying_edges: 1944 cf = vtki.new("FlyingEdges3D") 1945 cf.InterpolateAttributesOn() 1946 else: 1947 cf = vtki.new("ContourFilter") 1948 cf.UseScalarTreeOn() 1949 1950 cf.SetInputData(self.dataset) 1951 cf.ComputeNormalsOn() 1952 1953 if utils.is_sequence(value): 1954 cf.SetNumberOfContours(len(value)) 1955 for i, t in enumerate(value): 1956 cf.SetValue(i, t) 1957 else: 1958 if value is None: 1959 value = (2 * scrange[0] + scrange[1]) / 3.0 1960 # print("automatic isosurface value =", value) 1961 cf.SetValue(0, value) 1962 1963 cf.Update() 1964 poly = cf.GetOutput() 1965 1966 out = vedo.mesh.Mesh(poly, c=None).phong() 1967 out.mapper.SetScalarRange(scrange[0], scrange[1]) 1968 1969 out.pipeline = utils.OperationNode( 1970 "isosurface", 1971 parents=[self], 1972 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 1973 c="#4cc9f0:#e9c46a", 1974 ) 1975 return out 1976 1977 def isosurface_discrete(self, value=None, nsmooth=15) -> "vedo.mesh.Mesh": 1978 """ 1979 Create boundary/isocontour surfaces from a label map (e.g., a segmented image) using a threaded, 1980 3D version of the multiple objects/labels Surface Nets algorithm. 1981 The input is a 3D image (i.e., volume) where each voxel is labeled 1982 (integer labels are preferred to real values), and the output data is a polygonal mesh separating 1983 labeled regions / objects. 1984 (Note that on output each region [corresponding to a different segmented object] will share 1985 points/edges on a common boundary, i.e., two neighboring objects will share the boundary that separates them). 1986 1987 Arguments: 1988 value : (float, list) 1989 single value or list of values to draw the isosurface(s). 1990 nsmooth : (int) 1991 number of iterations of smoothing (0 means no smoothing). 1992 1993 Examples: 1994 - [isosurfaces2.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces2.py) 1995 """ 1996 if not utils.is_sequence(value): 1997 value = [value] 1998 1999 snets = vtki.new("SurfaceNets3D") 2000 snets.SetInputData(self.dataset) 2001 2002 if nsmooth: 2003 snets.SmoothingOn() 2004 snets.AutomaticSmoothingConstraintsOn() 2005 snets.GetSmoother().SetNumberOfIterations(nsmooth) 2006 # snets.GetSmoother().SetRelaxationFactor(relaxation_factor) 2007 # snets.GetSmoother().SetConstraintDistance(constraint_distance) 2008 else: 2009 snets.SmoothingOff() 2010 2011 for i, val in enumerate(value): 2012 snets.SetValue(i, val) 2013 snets.Update() 2014 snets.SetOutputMeshTypeToTriangles() 2015 snets.SetOutputStyleToBoundary() 2016 snets.Update() 2017 2018 out = vedo.mesh.Mesh(snets.GetOutput()) 2019 out.pipeline = utils.OperationNode( 2020 "isosurface_discrete", 2021 parents=[self], 2022 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 2023 c="#4cc9f0:#e9c46a", 2024 ) 2025 return out 2026 2027 2028 def legosurface( 2029 self, 2030 vmin=None, 2031 vmax=None, 2032 invert=False, 2033 boundary=False, 2034 array_name="input_scalars", 2035 ) -> "vedo.mesh.Mesh": 2036 """ 2037 Represent an object - typically a `Volume` - as lego blocks (voxels). 2038 By default colors correspond to the volume's scalar. 2039 Returns an `Mesh` object. 2040 2041 Arguments: 2042 vmin : (float) 2043 the lower threshold, voxels below this value are not shown. 2044 vmax : (float) 2045 the upper threshold, voxels above this value are not shown. 2046 boundary : (bool) 2047 controls whether to include cells that are partially inside 2048 array_name : (int, str) 2049 name or index of the scalar array to be considered 2050 2051 Examples: 2052 - [legosurface.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/legosurface.py) 2053 2054 ![](https://vedo.embl.es/images/volumetric/56820682-da40e500-684c-11e9-8ea3-91cbcba24b3a.png) 2055 """ 2056 imp_dataset = vtki.new("ImplicitDataSet") 2057 imp_dataset.SetDataSet(self.dataset) 2058 window = vtki.new("ImplicitWindowFunction") 2059 window.SetImplicitFunction(imp_dataset) 2060 2061 srng = list(self.dataset.GetScalarRange()) 2062 if vmin is not None: 2063 srng[0] = vmin 2064 if vmax is not None: 2065 srng[1] = vmax 2066 tol = 0.00001 * (srng[1] - srng[0]) 2067 srng[0] -= tol 2068 srng[1] += tol 2069 window.SetWindowRange(srng) 2070 2071 extract = vtki.new("ExtractGeometry") 2072 extract.SetInputData(self.dataset) 2073 extract.SetImplicitFunction(window) 2074 extract.SetExtractInside(invert) 2075 extract.SetExtractBoundaryCells(boundary) 2076 extract.Update() 2077 2078 gf = vtki.new("GeometryFilter") 2079 gf.SetInputData(extract.GetOutput()) 2080 gf.Update() 2081 2082 m = vedo.mesh.Mesh(gf.GetOutput()).lw(0.1).flat() 2083 m.map_points_to_cells() 2084 m.celldata.select(array_name) 2085 2086 m.pipeline = utils.OperationNode( 2087 "legosurface", 2088 parents=[self], 2089 comment=f"array: {array_name}", 2090 c="#4cc9f0:#e9c46a", 2091 ) 2092 return m 2093 2094 def tomesh(self, fill=True, shrink=1.0) -> "vedo.mesh.Mesh": 2095 """ 2096 Build a polygonal Mesh from the current object. 2097 2098 If `fill=True`, the interior faces of all the cells are created. 2099 (setting a `shrink` value slightly smaller than the default 1.0 2100 can avoid flickering due to internal adjacent faces). 2101 2102 If `fill=False`, only the boundary faces will be generated. 2103 """ 2104 gf = vtki.new("GeometryFilter") 2105 if fill: 2106 sf = vtki.new("ShrinkFilter") 2107 sf.SetInputData(self.dataset) 2108 sf.SetShrinkFactor(shrink) 2109 sf.Update() 2110 gf.SetInputData(sf.GetOutput()) 2111 gf.Update() 2112 poly = gf.GetOutput() 2113 if shrink == 1.0: 2114 clean_poly = vtki.new("CleanPolyData") 2115 clean_poly.PointMergingOn() 2116 clean_poly.ConvertLinesToPointsOn() 2117 clean_poly.ConvertPolysToLinesOn() 2118 clean_poly.ConvertStripsToPolysOn() 2119 clean_poly.SetInputData(poly) 2120 clean_poly.Update() 2121 poly = clean_poly.GetOutput() 2122 else: 2123 gf.SetInputData(self.dataset) 2124 gf.Update() 2125 poly = gf.GetOutput() 2126 2127 msh = vedo.mesh.Mesh(poly).flat() 2128 msh.scalarbar = self.scalarbar 2129 lut = utils.ctf2lut(self) 2130 if lut: 2131 msh.mapper.SetLookupTable(lut) 2132 2133 msh.pipeline = utils.OperationNode( 2134 "tomesh", parents=[self], comment=f"fill={fill}", c="#9e2a2b:#e9c46a" 2135 ) 2136 return msh
44class DataArrayHelper: 45 # Internal use only. 46 # Helper class to manage data associated to either 47 # points (or vertices) and cells (or faces). 48 def __init__(self, obj, association): 49 50 self.obj = obj 51 self.association = association 52 53 def __getitem__(self, key): 54 55 if self.association == 0: 56 data = self.obj.dataset.GetPointData() 57 58 elif self.association == 1: 59 data = self.obj.dataset.GetCellData() 60 61 elif self.association == 2: 62 data = self.obj.dataset.GetFieldData() 63 64 varr = data.GetAbstractArray(key) 65 if isinstance(varr, vtki.vtkStringArray): 66 if isinstance(key, int): 67 key = data.GetArrayName(key) 68 n = varr.GetNumberOfValues() 69 narr = [varr.GetValue(i) for i in range(n)] 70 return narr 71 ########### 72 73 else: 74 raise RuntimeError() 75 76 if isinstance(key, int): 77 key = data.GetArrayName(key) 78 79 arr = data.GetArray(key) 80 if not arr: 81 return None 82 return utils.vtk2numpy(arr) 83 84 def __setitem__(self, key, input_array): 85 86 if self.association == 0: 87 data = self.obj.dataset.GetPointData() 88 n = self.obj.dataset.GetNumberOfPoints() 89 self.obj.mapper.SetScalarModeToUsePointData() 90 91 elif self.association == 1: 92 data = self.obj.dataset.GetCellData() 93 n = self.obj.dataset.GetNumberOfCells() 94 self.obj.mapper.SetScalarModeToUseCellData() 95 96 elif self.association == 2: 97 data = self.obj.dataset.GetFieldData() 98 if not utils.is_sequence(input_array): 99 input_array = [input_array] 100 101 if isinstance(input_array[0], str): 102 varr = vtki.vtkStringArray() 103 varr.SetName(key) 104 varr.SetNumberOfComponents(1) 105 varr.SetNumberOfTuples(len(input_array)) 106 for i, iarr in enumerate(input_array): 107 if isinstance(iarr, np.ndarray): 108 iarr = iarr.tolist() # better format 109 # Note: a string k can be converted to numpy with 110 # import json; k = np.array(json.loads(k)) 111 varr.InsertValue(i, str(iarr)) 112 else: 113 try: 114 varr = utils.numpy2vtk(input_array, name=key) 115 except TypeError as e: 116 vedo.logger.error( 117 f"cannot create metadata with input object:\n" 118 f"{input_array}" 119 f"\n\nAllowed content examples are:\n" 120 f"- flat list of strings ['a','b', 1, [1,2,3], ...]" 121 f" (first item must be a string in this case)\n" 122 f" hint: use k = np.array(json.loads(k)) to convert strings\n" 123 f"- numpy arrays of any shape" 124 ) 125 raise e 126 127 data.AddArray(varr) 128 return ############ 129 130 else: 131 raise RuntimeError() 132 133 if len(input_array) != n: 134 vedo.logger.error( 135 f"Error in point/cell data: length of input {len(input_array)}" 136 f" != {n} nr. of elements" 137 ) 138 raise RuntimeError() 139 140 input_array = np.asarray(input_array) 141 varr = utils.numpy2vtk(input_array, name=key) 142 data.AddArray(varr) 143 144 if len(input_array.shape) == 1: # scalars 145 data.SetActiveScalars(key) 146 try: # could be a volume mapper 147 self.obj.mapper.SetScalarRange(data.GetScalars().GetRange()) 148 except AttributeError: 149 pass 150 elif len(input_array.shape) == 2 and input_array.shape[1] == 3: # vectors 151 if key.lower() == "normals": 152 data.SetActiveNormals(key) 153 else: 154 data.SetActiveVectors(key) 155 156 def keys(self) -> List[str]: 157 """Return the list of available data array names""" 158 if self.association == 0: 159 data = self.obj.dataset.GetPointData() 160 elif self.association == 1: 161 data = self.obj.dataset.GetCellData() 162 elif self.association == 2: 163 data = self.obj.dataset.GetFieldData() 164 arrnames = [] 165 for i in range(data.GetNumberOfArrays()): 166 name = "" 167 if self.association == 2: 168 name = data.GetAbstractArray(i).GetName() 169 else: 170 iarr = data.GetArray(i) 171 if iarr: 172 name = iarr.GetName() 173 if name: 174 arrnames.append(name) 175 return arrnames 176 177 def items(self) -> List: 178 """Return the list of available data array `(names, values)`.""" 179 if self.association == 0: 180 data = self.obj.dataset.GetPointData() 181 elif self.association == 1: 182 data = self.obj.dataset.GetCellData() 183 elif self.association == 2: 184 data = self.obj.dataset.GetFieldData() 185 arrnames = [] 186 for i in range(data.GetNumberOfArrays()): 187 if self.association == 2: 188 name = data.GetAbstractArray(i).GetName() 189 else: 190 name = data.GetArray(i).GetName() 191 if name: 192 arrnames.append((name, self[name])) 193 return arrnames 194 195 def todict(self) -> dict: 196 """Return a dictionary of the available data arrays.""" 197 return dict(self.items()) 198 199 def rename(self, oldname: str, newname: str) -> None: 200 """Rename an array""" 201 if self.association == 0: 202 varr = self.obj.dataset.GetPointData().GetArray(oldname) 203 elif self.association == 1: 204 varr = self.obj.dataset.GetCellData().GetArray(oldname) 205 elif self.association == 2: 206 varr = self.obj.dataset.GetFieldData().GetAbstractArray(oldname) 207 if varr: 208 varr.SetName(newname) 209 else: 210 vedo.logger.warning( 211 f"Cannot rename non existing array {oldname} to {newname}" 212 ) 213 214 def remove(self, key: Union[int, str]) -> None: 215 """Remove a data array by name or number""" 216 if self.association == 0: 217 self.obj.dataset.GetPointData().RemoveArray(key) 218 elif self.association == 1: 219 self.obj.dataset.GetCellData().RemoveArray(key) 220 elif self.association == 2: 221 self.obj.dataset.GetFieldData().RemoveArray(key) 222 223 def clear(self) -> None: 224 """Remove all data associated to this object""" 225 if self.association == 0: 226 data = self.obj.dataset.GetPointData() 227 elif self.association == 1: 228 data = self.obj.dataset.GetCellData() 229 elif self.association == 2: 230 data = self.obj.dataset.GetFieldData() 231 for i in range(data.GetNumberOfArrays()): 232 if self.association == 2: 233 name = data.GetAbstractArray(i).GetName() 234 else: 235 name = data.GetArray(i).GetName() 236 data.RemoveArray(name) 237 238 def select(self, key: Union[int, str]) -> Any: 239 """Select one specific array by its name to make it the `active` one.""" 240 # Default (ColorModeToDefault): unsigned char scalars are treated as colors, 241 # and NOT mapped through the lookup table, while everything else is. 242 # ColorModeToDirectScalar extends ColorModeToDefault such that all integer 243 # types are treated as colors with values in the range 0-255 244 # and floating types are treated as colors with values in the range 0.0-1.0. 245 # Setting ColorModeToMapScalars means that all scalar data will be mapped 246 # through the lookup table. 247 # (Note that for multi-component scalars, the particular component 248 # to use for mapping can be specified using the SelectColorArray() method.) 249 if self.association == 0: 250 data = self.obj.dataset.GetPointData() 251 self.obj.mapper.SetScalarModeToUsePointData() 252 else: 253 data = self.obj.dataset.GetCellData() 254 self.obj.mapper.SetScalarModeToUseCellData() 255 256 if isinstance(key, int): 257 key = data.GetArrayName(key) 258 259 arr = data.GetArray(key) 260 if not arr: 261 return self.obj 262 263 nc = arr.GetNumberOfComponents() 264 # print("GetNumberOfComponents", nc) 265 if nc == 1: 266 data.SetActiveScalars(key) 267 elif nc == 2: 268 data.SetTCoords(arr) 269 elif nc in (3, 4): 270 if "rgb" in key.lower(): # type: ignore 271 data.SetActiveScalars(key) 272 try: 273 # could be a volume mapper 274 self.obj.mapper.SetColorModeToDirectScalars() 275 data.SetActiveVectors(None) # need this to fix bug in #1066 276 # print("SetColorModeToDirectScalars for", key) 277 except AttributeError: 278 pass 279 else: 280 data.SetActiveVectors(key) 281 elif nc == 9: 282 data.SetActiveTensors(key) 283 else: 284 vedo.logger.error(f"Cannot select array {key} with {nc} components") 285 return self.obj 286 287 try: 288 # could be a volume mapper 289 self.obj.mapper.SetArrayName(key) 290 self.obj.mapper.ScalarVisibilityOn() 291 except AttributeError: 292 pass 293 294 return self.obj 295 296 def select_texture_coords(self, key: Union[int,str]) -> Any: 297 """Select one specific array to be used as texture coordinates.""" 298 if self.association == 0: 299 data = self.obj.dataset.GetPointData() 300 else: 301 vedo.logger.warning("texture coordinates are only available for point data") 302 return 303 304 if isinstance(key, int): 305 key = data.GetArrayName(key) 306 data.SetTCoords(data.GetArray(key)) 307 return self.obj 308 309 def select_normals(self, key: Union[int,str]) -> Any: 310 """Select one specific normal array by its name to make it the "active" one.""" 311 if self.association == 0: 312 data = self.obj.dataset.GetPointData() 313 self.obj.mapper.SetScalarModeToUsePointData() 314 else: 315 data = self.obj.dataset.GetCellData() 316 self.obj.mapper.SetScalarModeToUseCellData() 317 318 if isinstance(key, int): 319 key = data.GetArrayName(key) 320 data.SetActiveNormals(key) 321 return self.obj 322 323 def print(self, **kwargs) -> None: 324 """Print the array names available to terminal""" 325 colors.printc(self.keys(), **kwargs) 326 327 def __repr__(self) -> str: 328 """Representation""" 329 330 def _get_str(pd, header): 331 out = f"\x1b[2m\x1b[1m\x1b[7m{header}" 332 if pd.GetNumberOfArrays(): 333 if self.obj.name: 334 out += f" in {self.obj.name}" 335 out += f" contains {pd.GetNumberOfArrays()} array(s)\x1b[0m" 336 for i in range(pd.GetNumberOfArrays()): 337 varr = pd.GetArray(i) 338 out += f"\n\x1b[1m\x1b[4mArray name : {varr.GetName()}\x1b[0m" 339 out += "\nindex".ljust(15) + f": {i}" 340 t = varr.GetDataType() 341 if t in vtki.array_types: 342 out += "\ntype".ljust(15) 343 out += f": {vtki.array_types[t]}" 344 shape = (varr.GetNumberOfTuples(), varr.GetNumberOfComponents()) 345 out += "\nshape".ljust(15) + f": {shape}" 346 out += "\nrange".ljust(15) + f": {np.array(varr.GetRange())}" 347 out += "\nmax id".ljust(15) + f": {varr.GetMaxId()}" 348 out += "\nlook up table".ljust(15) + f": {bool(varr.GetLookupTable())}" 349 out += "\nin-memory size".ljust(15) + f": {varr.GetActualMemorySize()} KB" 350 else: 351 out += " is empty.\x1b[0m" 352 return out 353 354 if self.association == 0: 355 out = _get_str(self.obj.dataset.GetPointData(), "Point Data") 356 elif self.association == 1: 357 out = _get_str(self.obj.dataset.GetCellData(), "Cell Data") 358 elif self.association == 2: 359 pd = self.obj.dataset.GetFieldData() 360 if pd.GetNumberOfArrays(): 361 out = "\x1b[2m\x1b[1m\x1b[7mMeta Data" 362 if self.obj.name: 363 out += f" in {self.obj.name}" 364 out += f" contains {pd.GetNumberOfArrays()} entries\x1b[0m" 365 for i in range(pd.GetNumberOfArrays()): 366 varr = pd.GetAbstractArray(i) 367 out += f"\n\x1b[1m\x1b[4mEntry name : {varr.GetName()}\x1b[0m" 368 out += "\nindex".ljust(15) + f": {i}" 369 shape = (varr.GetNumberOfTuples(), varr.GetNumberOfComponents()) 370 out += "\nshape".ljust(15) + f": {shape}" 371 372 return out
156 def keys(self) -> List[str]: 157 """Return the list of available data array names""" 158 if self.association == 0: 159 data = self.obj.dataset.GetPointData() 160 elif self.association == 1: 161 data = self.obj.dataset.GetCellData() 162 elif self.association == 2: 163 data = self.obj.dataset.GetFieldData() 164 arrnames = [] 165 for i in range(data.GetNumberOfArrays()): 166 name = "" 167 if self.association == 2: 168 name = data.GetAbstractArray(i).GetName() 169 else: 170 iarr = data.GetArray(i) 171 if iarr: 172 name = iarr.GetName() 173 if name: 174 arrnames.append(name) 175 return arrnames
Return the list of available data array names
177 def items(self) -> List: 178 """Return the list of available data array `(names, values)`.""" 179 if self.association == 0: 180 data = self.obj.dataset.GetPointData() 181 elif self.association == 1: 182 data = self.obj.dataset.GetCellData() 183 elif self.association == 2: 184 data = self.obj.dataset.GetFieldData() 185 arrnames = [] 186 for i in range(data.GetNumberOfArrays()): 187 if self.association == 2: 188 name = data.GetAbstractArray(i).GetName() 189 else: 190 name = data.GetArray(i).GetName() 191 if name: 192 arrnames.append((name, self[name])) 193 return arrnames
Return the list of available data array (names, values)
.
195 def todict(self) -> dict: 196 """Return a dictionary of the available data arrays.""" 197 return dict(self.items())
Return a dictionary of the available data arrays.
199 def rename(self, oldname: str, newname: str) -> None: 200 """Rename an array""" 201 if self.association == 0: 202 varr = self.obj.dataset.GetPointData().GetArray(oldname) 203 elif self.association == 1: 204 varr = self.obj.dataset.GetCellData().GetArray(oldname) 205 elif self.association == 2: 206 varr = self.obj.dataset.GetFieldData().GetAbstractArray(oldname) 207 if varr: 208 varr.SetName(newname) 209 else: 210 vedo.logger.warning( 211 f"Cannot rename non existing array {oldname} to {newname}" 212 )
Rename an array
214 def remove(self, key: Union[int, str]) -> None: 215 """Remove a data array by name or number""" 216 if self.association == 0: 217 self.obj.dataset.GetPointData().RemoveArray(key) 218 elif self.association == 1: 219 self.obj.dataset.GetCellData().RemoveArray(key) 220 elif self.association == 2: 221 self.obj.dataset.GetFieldData().RemoveArray(key)
Remove a data array by name or number
223 def clear(self) -> None: 224 """Remove all data associated to this object""" 225 if self.association == 0: 226 data = self.obj.dataset.GetPointData() 227 elif self.association == 1: 228 data = self.obj.dataset.GetCellData() 229 elif self.association == 2: 230 data = self.obj.dataset.GetFieldData() 231 for i in range(data.GetNumberOfArrays()): 232 if self.association == 2: 233 name = data.GetAbstractArray(i).GetName() 234 else: 235 name = data.GetArray(i).GetName() 236 data.RemoveArray(name)
Remove all data associated to this object
238 def select(self, key: Union[int, str]) -> Any: 239 """Select one specific array by its name to make it the `active` one.""" 240 # Default (ColorModeToDefault): unsigned char scalars are treated as colors, 241 # and NOT mapped through the lookup table, while everything else is. 242 # ColorModeToDirectScalar extends ColorModeToDefault such that all integer 243 # types are treated as colors with values in the range 0-255 244 # and floating types are treated as colors with values in the range 0.0-1.0. 245 # Setting ColorModeToMapScalars means that all scalar data will be mapped 246 # through the lookup table. 247 # (Note that for multi-component scalars, the particular component 248 # to use for mapping can be specified using the SelectColorArray() method.) 249 if self.association == 0: 250 data = self.obj.dataset.GetPointData() 251 self.obj.mapper.SetScalarModeToUsePointData() 252 else: 253 data = self.obj.dataset.GetCellData() 254 self.obj.mapper.SetScalarModeToUseCellData() 255 256 if isinstance(key, int): 257 key = data.GetArrayName(key) 258 259 arr = data.GetArray(key) 260 if not arr: 261 return self.obj 262 263 nc = arr.GetNumberOfComponents() 264 # print("GetNumberOfComponents", nc) 265 if nc == 1: 266 data.SetActiveScalars(key) 267 elif nc == 2: 268 data.SetTCoords(arr) 269 elif nc in (3, 4): 270 if "rgb" in key.lower(): # type: ignore 271 data.SetActiveScalars(key) 272 try: 273 # could be a volume mapper 274 self.obj.mapper.SetColorModeToDirectScalars() 275 data.SetActiveVectors(None) # need this to fix bug in #1066 276 # print("SetColorModeToDirectScalars for", key) 277 except AttributeError: 278 pass 279 else: 280 data.SetActiveVectors(key) 281 elif nc == 9: 282 data.SetActiveTensors(key) 283 else: 284 vedo.logger.error(f"Cannot select array {key} with {nc} components") 285 return self.obj 286 287 try: 288 # could be a volume mapper 289 self.obj.mapper.SetArrayName(key) 290 self.obj.mapper.ScalarVisibilityOn() 291 except AttributeError: 292 pass 293 294 return self.obj
Select one specific array by its name to make it the active
one.
296 def select_texture_coords(self, key: Union[int,str]) -> Any: 297 """Select one specific array to be used as texture coordinates.""" 298 if self.association == 0: 299 data = self.obj.dataset.GetPointData() 300 else: 301 vedo.logger.warning("texture coordinates are only available for point data") 302 return 303 304 if isinstance(key, int): 305 key = data.GetArrayName(key) 306 data.SetTCoords(data.GetArray(key)) 307 return self.obj
Select one specific array to be used as texture coordinates.
309 def select_normals(self, key: Union[int,str]) -> Any: 310 """Select one specific normal array by its name to make it the "active" one.""" 311 if self.association == 0: 312 data = self.obj.dataset.GetPointData() 313 self.obj.mapper.SetScalarModeToUsePointData() 314 else: 315 data = self.obj.dataset.GetCellData() 316 self.obj.mapper.SetScalarModeToUseCellData() 317 318 if isinstance(key, int): 319 key = data.GetArrayName(key) 320 data.SetActiveNormals(key) 321 return self.obj
Select one specific normal array by its name to make it the "active" one.
376class CommonAlgorithms: 377 """Common algorithms.""" 378 379 @property 380 def pointdata(self): 381 """ 382 Create and/or return a `numpy.array` associated to points (vertices). 383 A data array can be indexed either as a string or by an integer number. 384 E.g.: `myobj.pointdata["arrayname"]` 385 386 Usage: 387 388 `myobj.pointdata.keys()` to return the available data array names 389 390 `myobj.pointdata.select(name)` to make this array the active one 391 392 `myobj.pointdata.remove(name)` to remove this array 393 """ 394 return DataArrayHelper(self, 0) 395 396 @property 397 def celldata(self): 398 """ 399 Create and/or return a `numpy.array` associated to cells (faces). 400 A data array can be indexed either as a string or by an integer number. 401 E.g.: `myobj.celldata["arrayname"]` 402 403 Usage: 404 405 `myobj.celldata.keys()` to return the available data array names 406 407 `myobj.celldata.select(name)` to make this array the active one 408 409 `myobj.celldata.remove(name)` to remove this array 410 """ 411 return DataArrayHelper(self, 1) 412 413 @property 414 def metadata(self): 415 """ 416 Create and/or return a `numpy.array` associated to neither cells nor faces. 417 A data array can be indexed either as a string or by an integer number. 418 E.g.: `myobj.metadata["arrayname"]` 419 420 Usage: 421 422 `myobj.metadata.keys()` to return the available data array names 423 424 `myobj.metadata.select(name)` to make this array the active one 425 426 `myobj.metadata.remove(name)` to remove this array 427 """ 428 return DataArrayHelper(self, 2) 429 430 def memory_address(self) -> int: 431 """ 432 Return a unique memory address integer which may serve as the ID of the 433 object, or passed to c++ code. 434 """ 435 # https://www.linkedin.com/pulse/speedup-your-code-accessing-python-vtk-objects-from-c-pletzer/ 436 # https://github.com/tfmoraes/polydata_connectivity 437 return int(self.dataset.GetAddressAsString("")[5:], 16) 438 439 def memory_size(self) -> int: 440 """Return the size in bytes of the object in memory.""" 441 return self.dataset.GetActualMemorySize() 442 443 def modified(self) -> Self: 444 """Use in conjunction with `tonumpy()` to update any modifications to the image array.""" 445 self.dataset.GetPointData().Modified() 446 scals = self.dataset.GetPointData().GetScalars() 447 if scals: 448 scals.Modified() 449 return self 450 451 def box(self, scale=1, padding=0) -> "vedo.Mesh": 452 """ 453 Return the bounding box as a new `Mesh` object. 454 455 Arguments: 456 scale : (float) 457 box size can be scaled by a factor 458 padding : (float, list) 459 a constant padding can be added (can be a list `[padx,pady,padz]`) 460 """ 461 b = self.bounds() 462 if not utils.is_sequence(padding): 463 padding = [padding, padding, padding] 464 length, width, height = b[1] - b[0], b[3] - b[2], b[5] - b[4] 465 tol = (length + width + height) / 30000 # useful for boxing text 466 pos = [(b[0] + b[1]) / 2, (b[3] + b[2]) / 2, (b[5] + b[4]) / 2 - tol] 467 bx = vedo.shapes.Box( 468 pos, 469 length * scale + padding[0], 470 width * scale + padding[1], 471 height * scale + padding[2], 472 c="gray", 473 ) 474 try: 475 pr = vtki.vtkProperty() 476 pr.DeepCopy(self.properties) 477 bx.actor.SetProperty(pr) 478 bx.properties = pr 479 except (AttributeError, TypeError): 480 pass 481 bx.flat().lighting("off").wireframe(True) 482 return bx 483 484 def update_dataset(self, dataset, **kwargs) -> Self: 485 """Update the dataset of the object with the provided VTK dataset.""" 486 self._update(dataset, **kwargs) 487 return self 488 489 def bounds(self) -> np.ndarray: 490 """ 491 Get the object bounds. 492 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 493 """ 494 try: # this is very slow for large meshes 495 pts = self.vertices 496 xmin, ymin, zmin = np.min(pts, axis=0) 497 xmax, ymax, zmax = np.max(pts, axis=0) 498 return np.array([xmin, xmax, ymin, ymax, zmin, zmax]) 499 except (AttributeError, ValueError): 500 return np.array(self.dataset.GetBounds()) 501 502 def xbounds(self, i=None) -> np.ndarray: 503 """Get the bounds `[xmin,xmax]`. Can specify upper or lower with i (0,1).""" 504 b = self.bounds() 505 if i is not None: 506 return b[i] 507 return np.array([b[0], b[1]]) 508 509 def ybounds(self, i=None) -> np.ndarray: 510 """Get the bounds `[ymin,ymax]`. Can specify upper or lower with i (0,1).""" 511 b = self.bounds() 512 if i == 0: 513 return b[2] 514 if i == 1: 515 return b[3] 516 return np.array([b[2], b[3]]) 517 518 def zbounds(self, i=None) -> np.ndarray: 519 """Get the bounds `[zmin,zmax]`. Can specify upper or lower with i (0,1).""" 520 b = self.bounds() 521 if i == 0: 522 return b[4] 523 if i == 1: 524 return b[5] 525 return np.array([b[4], b[5]]) 526 527 def diagonal_size(self) -> float: 528 """Get the length of the diagonal of the bounding box.""" 529 b = self.bounds() 530 return np.sqrt((b[1] - b[0])**2 + (b[3] - b[2])**2 + (b[5] - b[4])**2) 531 532 def average_size(self) -> float: 533 """ 534 Calculate and return the average size of the object. 535 This is the mean of the vertex distances from the center of mass. 536 """ 537 coords = self.vertices 538 cm = np.mean(coords, axis=0) 539 if coords.shape[0] == 0: 540 return 0.0 541 cc = coords - cm 542 return np.mean(np.linalg.norm(cc, axis=1)) 543 544 def center_of_mass(self) -> np.ndarray: 545 """Get the center of mass of the object.""" 546 if isinstance(self, (vedo.RectilinearGrid, vedo.Volume)): 547 return np.array(self.dataset.GetCenter()) 548 cmf = vtki.new("CenterOfMass") 549 cmf.SetInputData(self.dataset) 550 cmf.Update() 551 c = cmf.GetCenter() 552 return np.array(c) 553 554 def copy_data_from(self, obj: Any) -> Self: 555 """Copy all data (point and cell data) from this input object""" 556 self.dataset.GetPointData().PassData(obj.dataset.GetPointData()) 557 self.dataset.GetCellData().PassData(obj.dataset.GetCellData()) 558 self.pipeline = utils.OperationNode( 559 "copy_data_from", 560 parents=[self, obj], 561 comment=f"{obj.__class__.__name__}", 562 shape="note", 563 c="#ccc5b9", 564 ) 565 return self 566 567 def inputdata(self): 568 """Obsolete, use `.dataset` instead.""" 569 colors.printc("WARNING: 'inputdata()' is obsolete, use '.dataset' instead.", c="y") 570 return self.dataset 571 572 @property 573 def npoints(self): 574 """Retrieve the number of points (or vertices).""" 575 return self.dataset.GetNumberOfPoints() 576 577 @property 578 def nvertices(self): 579 """Retrieve the number of vertices (or points).""" 580 return self.dataset.GetNumberOfPoints() 581 582 @property 583 def ncells(self): 584 """Retrieve the number of cells.""" 585 return self.dataset.GetNumberOfCells() 586 587 def points(self, pts=None): 588 """Obsolete, use `self.vertices` or `self.coordinates` instead.""" 589 if pts is None: ### getter 590 591 if warnings["points_getter"]: 592 colors.printc(warnings["points_getter"], c="y") 593 warnings["points_getter"] = "" 594 return self.vertices 595 596 else: ### setter 597 598 if warnings["points_setter"]: 599 colors.printc(warnings["points_setter"], c="y") 600 warnings["points_setter"] = "" 601 602 pts = np.asarray(pts, dtype=np.float32) 603 604 if pts.ndim == 1: 605 ### getter by point index ################### 606 indices = pts.astype(int) 607 vpts = self.dataset.GetPoints() 608 arr = utils.vtk2numpy(vpts.GetData()) 609 return arr[indices] ########### 610 611 ### setter #################################### 612 if pts.shape[1] == 2: 613 pts = np.c_[pts, np.zeros(pts.shape[0], dtype=np.float32)] 614 arr = utils.numpy2vtk(pts, dtype=np.float32) 615 616 vpts = self.dataset.GetPoints() 617 vpts.SetData(arr) 618 vpts.Modified() 619 # reset mesh to identity matrix position/rotation: 620 self.point_locator = None 621 self.cell_locator = None 622 self.line_locator = None 623 self.transform = LinearTransform() 624 return self 625 626 @property 627 def cell_centers(self): 628 """ 629 Get the coordinates of the cell centers. 630 631 Examples: 632 - [delaunay2d.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/delaunay2d.py) 633 634 See also: `CellCenters()`. 635 """ 636 vcen = vtki.new("CellCenters") 637 vcen.CopyArraysOff() 638 vcen.SetInputData(self.dataset) 639 vcen.Update() 640 return utils.vtk2numpy(vcen.GetOutput().GetPoints().GetData()) 641 642 @property 643 def lines(self): 644 """ 645 Get lines connectivity ids as a python array 646 formatted as `[[id0,id1], [id3,id4], ...]` 647 648 See also: `lines_as_flat_array()`. 649 """ 650 # Get cell connettivity ids as a 1D array. The vtk format is: 651 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 652 try: 653 arr1d = utils.vtk2numpy(self.dataset.GetLines().GetData()) 654 except AttributeError: 655 return np.array([]) 656 i = 0 657 conn = [] 658 n = len(arr1d) 659 for _ in range(n): 660 cell = [arr1d[i + k + 1] for k in range(arr1d[i])] 661 conn.append(cell) 662 i += arr1d[i] + 1 663 if i >= n: 664 break 665 666 return conn # cannot always make a numpy array of it! 667 668 @property 669 def lines_as_flat_array(self): 670 """ 671 Get lines connectivity ids as a 1D numpy array. 672 Format is e.g. [2, 10,20, 3, 10,11,12, 2, 70,80, ...] 673 674 See also: `lines()`. 675 """ 676 try: 677 return utils.vtk2numpy(self.dataset.GetLines().GetData()) 678 except AttributeError: 679 return np.array([]) 680 681 def mark_boundaries(self) -> Self: 682 """ 683 Mark cells and vertices if they lie on a boundary. 684 A new array called `BoundaryCells` is added to the object. 685 """ 686 mb = vtki.new("MarkBoundaryFilter") 687 mb.SetInputData(self.dataset) 688 mb.Update() 689 self.dataset.DeepCopy(mb.GetOutput()) 690 self.pipeline = utils.OperationNode("mark_boundaries", parents=[self]) 691 return self 692 693 def find_cells_in_bounds(self, xbounds=(), ybounds=(), zbounds=()) -> np.ndarray: 694 """ 695 Find cells that are within the specified bounds. 696 """ 697 try: 698 xbounds = list(xbounds.bounds()) 699 except AttributeError: 700 pass 701 702 if len(xbounds) == 6: 703 bnds = xbounds 704 else: 705 bnds = list(self.bounds()) 706 if len(xbounds) == 2: 707 bnds[0] = xbounds[0] 708 bnds[1] = xbounds[1] 709 if len(ybounds) == 2: 710 bnds[2] = ybounds[0] 711 bnds[3] = ybounds[1] 712 if len(zbounds) == 2: 713 bnds[4] = zbounds[0] 714 bnds[5] = zbounds[1] 715 716 cell_ids = vtki.vtkIdList() 717 if not self.cell_locator: 718 self.cell_locator = vtki.new("CellTreeLocator") 719 self.cell_locator.SetDataSet(self.dataset) 720 self.cell_locator.BuildLocator() 721 self.cell_locator.FindCellsWithinBounds(bnds, cell_ids) 722 cids = [] 723 for i in range(cell_ids.GetNumberOfIds()): 724 cid = cell_ids.GetId(i) 725 cids.append(cid) 726 return np.array(cids) 727 728 def find_cells_along_line(self, p0, p1, tol=0.001) -> np.ndarray: 729 """ 730 Find cells that are intersected by a line segment. 731 """ 732 cell_ids = vtki.vtkIdList() 733 if not self.cell_locator: 734 self.cell_locator = vtki.new("CellTreeLocator") 735 self.cell_locator.SetDataSet(self.dataset) 736 self.cell_locator.BuildLocator() 737 self.cell_locator.FindCellsAlongLine(p0, p1, tol, cell_ids) 738 cids = [] 739 for i in range(cell_ids.GetNumberOfIds()): 740 cid = cell_ids.GetId(i) 741 cids.append(cid) 742 return np.array(cids) 743 744 def find_cells_along_plane(self, origin, normal, tol=0.001) -> np.ndarray: 745 """ 746 Find cells that are intersected by a plane. 747 """ 748 cell_ids = vtki.vtkIdList() 749 if not self.cell_locator: 750 self.cell_locator = vtki.new("CellTreeLocator") 751 self.cell_locator.SetDataSet(self.dataset) 752 self.cell_locator.BuildLocator() 753 self.cell_locator.FindCellsAlongPlane(origin, normal, tol, cell_ids) 754 cids = [] 755 for i in range(cell_ids.GetNumberOfIds()): 756 cid = cell_ids.GetId(i) 757 cids.append(cid) 758 return np.array(cids) 759 760 def keep_cell_types(self, types=()): 761 """ 762 Extract cells of a specific type. 763 764 Check the VTK cell types here: 765 https://vtk.org/doc/nightly/html/vtkCellType_8h.html 766 """ 767 fe = vtki.new("ExtractCellsByType") 768 fe.SetInputData(self.dataset) 769 for t in types: 770 try: 771 if utils.is_integer(t): 772 it = t 773 else: 774 it = vtki.cell_types[t.upper()] 775 except KeyError: 776 vedo.logger.error(f"Cell type '{t}' not recognized") 777 continue 778 fe.AddCellType(it) 779 fe.Update() 780 self._update(fe.GetOutput()) 781 return self 782 783 def map_cells_to_points(self, arrays=(), move=False) -> Self: 784 """ 785 Interpolate cell data (i.e., data specified per cell or face) 786 into point data (i.e., data specified at each vertex). 787 The method of transformation is based on averaging the data values 788 of all cells using a particular point. 789 790 A custom list of arrays to be mapped can be passed in input. 791 792 Set `move=True` to delete the original `celldata` array. 793 """ 794 c2p = vtki.new("CellDataToPointData") 795 c2p.SetInputData(self.dataset) 796 if not move: 797 c2p.PassCellDataOn() 798 if arrays: 799 c2p.ClearCellDataArrays() 800 c2p.ProcessAllArraysOff() 801 for arr in arrays: 802 c2p.AddCellDataArray(arr) 803 else: 804 c2p.ProcessAllArraysOn() 805 c2p.Update() 806 self._update(c2p.GetOutput(), reset_locators=False) 807 self.mapper.SetScalarModeToUsePointData() 808 self.pipeline = utils.OperationNode("map_cells_to_points", parents=[self]) 809 return self 810 811 @property 812 def vertices(self): 813 """Return the vertices (points) coordinates.""" 814 try: 815 # for polydata and unstructured grid 816 varr = self.dataset.GetPoints().GetData() 817 except (AttributeError, TypeError): 818 try: 819 # for RectilinearGrid, StructuredGrid 820 vpts = vtki.vtkPoints() 821 self.dataset.GetPoints(vpts) 822 varr = vpts.GetData() 823 except (AttributeError, TypeError): 824 try: 825 # for ImageData 826 v2p = vtki.new("ImageToPoints") 827 v2p.SetInputData(self.dataset) 828 v2p.Update() 829 varr = v2p.GetOutput().GetPoints().GetData() 830 except AttributeError: 831 return np.array([]) 832 833 return utils.vtk2numpy(varr) 834 835 # setter 836 @vertices.setter 837 def vertices(self, pts): 838 """Set vertices (points) coordinates.""" 839 pts = utils.make3d(pts) 840 arr = utils.numpy2vtk(pts, dtype=np.float32) 841 try: 842 vpts = self.dataset.GetPoints() 843 vpts.SetData(arr) 844 vpts.Modified() 845 except (AttributeError, TypeError): 846 vedo.logger.error(f"Cannot set vertices for {type(self)}") 847 return self 848 # reset mesh to identity matrix position/rotation: 849 self.point_locator = None 850 self.cell_locator = None 851 self.line_locator = None 852 self.transform = LinearTransform() 853 854 @property 855 def coordinates(self): 856 """Return the vertices (points) coordinates. Same as `vertices`.""" 857 return self.vertices 858 859 @coordinates.setter 860 def coordinates(self, pts): 861 """Set vertices (points) coordinates. Same as `vertices`.""" 862 self.vertices = pts 863 864 @property 865 def cells_as_flat_array(self): 866 """ 867 Get cell connectivity ids as a 1D numpy array. 868 Format is e.g. [3, 10,20,30 4, 10,11,12,13 ...] 869 """ 870 try: 871 # valid for unstructured grid 872 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 873 except AttributeError: 874 # valid for polydata 875 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 876 return arr1d 877 878 @property 879 def cells(self): 880 """ 881 Get the cells connectivity ids as a numpy array. 882 883 The output format is: `[[id0 ... idn], [id0 ... idm], etc]`. 884 """ 885 try: 886 # valid for unstructured grid 887 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 888 except AttributeError: 889 try: 890 # valid for polydata 891 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 892 except AttributeError: 893 vedo.logger.warning(f"Cannot get cells for {type(self)}") 894 return np.array([]) 895 896 # Get cell connettivity ids as a 1D array. vtk format is: 897 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 898 i = 0 899 conn = [] 900 n = len(arr1d) 901 if n: 902 while True: 903 cell = [arr1d[i + k] for k in range(1, arr1d[i] + 1)] 904 conn.append(cell) 905 i += arr1d[i] + 1 906 if i >= n: 907 break 908 return conn 909 910 def map_points_to_cells(self, arrays=(), move=False) -> Self: 911 """ 912 Interpolate point data (i.e., data specified per point or vertex) 913 into cell data (i.e., data specified per cell). 914 The method of transformation is based on averaging the data values 915 of all points defining a particular cell. 916 917 A custom list of arrays to be mapped can be passed in input. 918 919 Set `move=True` to delete the original `pointdata` array. 920 921 Examples: 922 - [mesh_map2cell.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/mesh_map2cell.py) 923 """ 924 p2c = vtki.new("PointDataToCellData") 925 p2c.SetInputData(self.dataset) 926 if not move: 927 p2c.PassPointDataOn() 928 if arrays: 929 p2c.ClearPointDataArrays() 930 p2c.ProcessAllArraysOff() 931 for arr in arrays: 932 p2c.AddPointDataArray(arr) 933 else: 934 p2c.ProcessAllArraysOn() 935 p2c.Update() 936 self._update(p2c.GetOutput(), reset_locators=False) 937 self.mapper.SetScalarModeToUseCellData() 938 self.pipeline = utils.OperationNode("map_points_to_cells", parents=[self]) 939 return self 940 941 def resample_data_from(self, source, tol=None, categorical=False) -> Self: 942 """ 943 Resample point and cell data from another dataset. 944 The output has the same structure but its point data have 945 the resampled values from target. 946 947 Use `tol` to set the tolerance used to compute whether 948 a point in the source is in a cell of the current object. 949 Points without resampled values, and their cells, are marked as blank. 950 If the data is categorical, then the resulting data will be determined 951 by a nearest neighbor interpolation scheme. 952 953 Example: 954 ```python 955 from vedo import * 956 m1 = Mesh(dataurl+'bunny.obj')#.add_gaussian_noise(0.1) 957 pts = m1.vertices 958 ces = m1.cell_centers 959 m1.pointdata["xvalues"] = np.power(pts[:,0], 3) 960 m1.celldata["yvalues"] = np.power(ces[:,1], 3) 961 m2 = Mesh(dataurl+'bunny.obj') 962 m2.resample_data_from(m1) 963 # print(m2.pointdata["xvalues"]) 964 show(m1, m2 , N=2, axes=1) 965 ``` 966 """ 967 rs = vtki.new("ResampleWithDataSet") 968 rs.SetInputData(self.dataset) 969 rs.SetSourceData(source.dataset) 970 971 rs.SetPassPointArrays(True) 972 rs.SetPassCellArrays(True) 973 rs.SetPassFieldArrays(True) 974 rs.SetCategoricalData(categorical) 975 976 rs.SetComputeTolerance(True) 977 if tol: 978 rs.SetComputeTolerance(False) 979 rs.SetTolerance(tol) 980 rs.Update() 981 self._update(rs.GetOutput(), reset_locators=False) 982 self.pipeline = utils.OperationNode( 983 "resample_data_from", 984 comment=f"{source.__class__.__name__}", 985 parents=[self, source], 986 ) 987 return self 988 989 def interpolate_data_from( 990 self, 991 source, 992 radius=None, 993 n=None, 994 kernel="shepard", 995 exclude=("Normals",), 996 on="points", 997 null_strategy=1, 998 null_value=0, 999 ) -> Self: 1000 """ 1001 Interpolate over source to port its data onto the current object using various kernels. 1002 1003 If n (number of closest points to use) is set then radius value is ignored. 1004 1005 Check out also: 1006 `probe()` which in many cases can be faster. 1007 1008 Arguments: 1009 kernel : (str) 1010 available kernels are [shepard, gaussian, linear] 1011 null_strategy : (int) 1012 specify a strategy to use when encountering a "null" point 1013 during the interpolation process. Null points occur when the local neighborhood 1014 (of nearby points to interpolate from) is empty. 1015 1016 - Case 0: an output array is created that marks points 1017 as being valid (=1) or null (invalid =0), and the null_value is set as well 1018 - Case 1: the output data value(s) are set to the provided null_value 1019 - Case 2: simply use the closest point to perform the interpolation. 1020 null_value : (float) 1021 see above. 1022 1023 Examples: 1024 - [interpolate_scalar1.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar1.py) 1025 - [interpolate_scalar3.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar3.py) 1026 - [interpolate_scalar4.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar4.py) 1027 - [image_probe.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/image_probe.py) 1028 1029 ![](https://vedo.embl.es/images/advanced/interpolateMeshArray.png) 1030 """ 1031 if radius is None and not n: 1032 vedo.logger.error("in interpolate_data_from(): please set either radius or n") 1033 raise RuntimeError 1034 1035 if on == "points": 1036 points = source.dataset 1037 elif on == "cells": 1038 c2p = vtki.new("CellDataToPointData") 1039 c2p.SetInputData(source.dataset) 1040 c2p.Update() 1041 points = c2p.GetOutput() 1042 else: 1043 vedo.logger.error("in interpolate_data_from(), on must be on points or cells") 1044 raise RuntimeError() 1045 1046 locator = vtki.new("PointLocator") 1047 locator.SetDataSet(points) 1048 locator.BuildLocator() 1049 1050 if kernel.lower() == "shepard": 1051 kern = vtki.new("ShepardKernel") 1052 kern.SetPowerParameter(2) 1053 elif kernel.lower() == "gaussian": 1054 kern = vtki.new("GaussianKernel") 1055 kern.SetSharpness(2) 1056 elif kernel.lower() == "linear": 1057 kern = vtki.new("LinearKernel") 1058 else: 1059 vedo.logger.error("available kernels are: [shepard, gaussian, linear]") 1060 raise RuntimeError() 1061 1062 if n: 1063 kern.SetNumberOfPoints(n) 1064 kern.SetKernelFootprintToNClosest() 1065 else: 1066 kern.SetRadius(radius) 1067 kern.SetKernelFootprintToRadius() 1068 1069 interpolator = vtki.new("PointInterpolator") 1070 interpolator.SetInputData(self.dataset) 1071 interpolator.SetSourceData(points) 1072 interpolator.SetKernel(kern) 1073 interpolator.SetLocator(locator) 1074 interpolator.PassFieldArraysOn() 1075 interpolator.SetNullPointsStrategy(null_strategy) 1076 interpolator.SetNullValue(null_value) 1077 interpolator.SetValidPointsMaskArrayName("ValidPointMask") 1078 for ex in exclude: 1079 interpolator.AddExcludedArray(ex) 1080 interpolator.Update() 1081 1082 if on == "cells": 1083 p2c = vtki.new("PointDataToCellData") 1084 p2c.SetInputData(interpolator.GetOutput()) 1085 p2c.Update() 1086 cpoly = p2c.GetOutput() 1087 else: 1088 cpoly = interpolator.GetOutput() 1089 1090 self._update(cpoly, reset_locators=False) 1091 1092 self.pipeline = utils.OperationNode("interpolate_data_from", parents=[self, source]) 1093 return self 1094 1095 def add_ids(self) -> Self: 1096 """ 1097 Generate point and cell ids arrays. 1098 1099 Two new arrays are added to the mesh: `PointID` and `CellID`. 1100 """ 1101 ids = vtki.new("IdFilter") 1102 ids.SetInputData(self.dataset) 1103 ids.PointIdsOn() 1104 ids.CellIdsOn() 1105 ids.FieldDataOff() 1106 ids.SetPointIdsArrayName("PointID") 1107 ids.SetCellIdsArrayName("CellID") 1108 ids.Update() 1109 self._update(ids.GetOutput(), reset_locators=False) 1110 self.pipeline = utils.OperationNode("add_ids", parents=[self]) 1111 return self 1112 1113 def gradient(self, input_array=None, on="points", fast=False) -> np.ndarray: 1114 """ 1115 Compute and return the gradiend of the active scalar field as a numpy array. 1116 1117 Arguments: 1118 input_array : (str) 1119 array of the scalars to compute the gradient, 1120 if None the current active array is selected 1121 on : (str) 1122 compute either on 'points' or 'cells' data 1123 fast : (bool) 1124 if True, will use a less accurate algorithm 1125 that performs fewer derivative calculations (and is therefore faster). 1126 1127 Examples: 1128 - [isolines.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/isolines.py) 1129 1130 ![](https://user-images.githubusercontent.com/32848391/72433087-f00a8780-3798-11ea-9778-991f0abeca70.png) 1131 """ 1132 gra = vtki.new("GradientFilter") 1133 if on.startswith("p"): 1134 varr = self.dataset.GetPointData() 1135 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1136 elif on.startswith("c"): 1137 varr = self.dataset.GetCellData() 1138 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1139 else: 1140 vedo.logger.error(f"in gradient: unknown option {on}") 1141 raise RuntimeError 1142 1143 if input_array is None: 1144 if varr.GetScalars(): 1145 input_array = varr.GetScalars().GetName() 1146 else: 1147 vedo.logger.error(f"in gradient: no scalars found for {on}") 1148 raise RuntimeError 1149 1150 gra.SetInputData(self.dataset) 1151 gra.SetInputScalars(tp, input_array) 1152 gra.SetResultArrayName("Gradient") 1153 gra.SetFasterApproximation(fast) 1154 gra.ComputeDivergenceOff() 1155 gra.ComputeVorticityOff() 1156 gra.ComputeGradientOn() 1157 gra.Update() 1158 if on.startswith("p"): 1159 gvecs = utils.vtk2numpy(gra.GetOutput().GetPointData().GetArray("Gradient")) 1160 else: 1161 gvecs = utils.vtk2numpy(gra.GetOutput().GetCellData().GetArray("Gradient")) 1162 return gvecs 1163 1164 def divergence(self, array_name=None, on="points", fast=False) -> np.ndarray: 1165 """ 1166 Compute and return the divergence of a vector field as a numpy array. 1167 1168 Arguments: 1169 array_name : (str) 1170 name of the array of vectors to compute the divergence, 1171 if None the current active array is selected 1172 on : (str) 1173 compute either on 'points' or 'cells' data 1174 fast : (bool) 1175 if True, will use a less accurate algorithm 1176 that performs fewer derivative calculations (and is therefore faster). 1177 """ 1178 div = vtki.new("GradientFilter") 1179 if on.startswith("p"): 1180 varr = self.dataset.GetPointData() 1181 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1182 elif on.startswith("c"): 1183 varr = self.dataset.GetCellData() 1184 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1185 else: 1186 vedo.logger.error(f"in divergence(): unknown option {on}") 1187 raise RuntimeError 1188 1189 if array_name is None: 1190 if varr.GetVectors(): 1191 array_name = varr.GetVectors().GetName() 1192 else: 1193 vedo.logger.error(f"in divergence(): no vectors found for {on}") 1194 raise RuntimeError 1195 1196 div.SetInputData(self.dataset) 1197 div.SetInputScalars(tp, array_name) 1198 div.ComputeDivergenceOn() 1199 div.ComputeGradientOff() 1200 div.ComputeVorticityOff() 1201 div.SetDivergenceArrayName("Divergence") 1202 div.SetFasterApproximation(fast) 1203 div.Update() 1204 if on.startswith("p"): 1205 dvecs = utils.vtk2numpy(div.GetOutput().GetPointData().GetArray("Divergence")) 1206 else: 1207 dvecs = utils.vtk2numpy(div.GetOutput().GetCellData().GetArray("Divergence")) 1208 return dvecs 1209 1210 def vorticity(self, array_name=None, on="points", fast=False) -> np.ndarray: 1211 """ 1212 Compute and return the vorticity of a vector field as a numpy array. 1213 1214 Arguments: 1215 array_name : (str) 1216 name of the array to compute the vorticity, 1217 if None the current active array is selected 1218 on : (str) 1219 compute either on 'points' or 'cells' data 1220 fast : (bool) 1221 if True, will use a less accurate algorithm 1222 that performs fewer derivative calculations (and is therefore faster). 1223 """ 1224 vort = vtki.new("GradientFilter") 1225 if on.startswith("p"): 1226 varr = self.dataset.GetPointData() 1227 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1228 elif on.startswith("c"): 1229 varr = self.dataset.GetCellData() 1230 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1231 else: 1232 vedo.logger.error(f"in vorticity(): unknown option {on}") 1233 raise RuntimeError 1234 1235 if array_name is None: 1236 if varr.GetVectors(): 1237 array_name = varr.GetVectors().GetName() 1238 else: 1239 vedo.logger.error(f"in vorticity(): no vectors found for {on}") 1240 raise RuntimeError 1241 1242 vort.SetInputData(self.dataset) 1243 vort.SetInputScalars(tp, array_name) 1244 vort.ComputeDivergenceOff() 1245 vort.ComputeGradientOff() 1246 vort.ComputeVorticityOn() 1247 vort.SetVorticityArrayName("Vorticity") 1248 vort.SetFasterApproximation(fast) 1249 vort.Update() 1250 if on.startswith("p"): 1251 vvecs = utils.vtk2numpy(vort.GetOutput().GetPointData().GetArray("Vorticity")) 1252 else: 1253 vvecs = utils.vtk2numpy(vort.GetOutput().GetCellData().GetArray("Vorticity")) 1254 return vvecs 1255 1256 def probe(self, source) -> Self: 1257 """ 1258 Takes a data set and probes its scalars at the specified points in space. 1259 1260 Note that a mask is also output with valid/invalid points which can be accessed 1261 with `mesh.pointdata['ValidPointMask']`. 1262 1263 Check out also: 1264 `interpolate_data_from()` 1265 1266 Examples: 1267 - [probe_points.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/probe_points.py) 1268 1269 ![](https://vedo.embl.es/images/volumetric/probePoints.png) 1270 """ 1271 probe_filter = vtki.new("ProbeFilter") 1272 probe_filter.SetSourceData(source.dataset) 1273 probe_filter.SetInputData(self.dataset) 1274 probe_filter.Update() 1275 self._update(probe_filter.GetOutput(), reset_locators=False) 1276 self.pipeline = utils.OperationNode("probe", parents=[self, source]) 1277 self.pointdata.rename("vtkValidPointMask", "ValidPointMask") 1278 return self 1279 1280 def compute_cell_size(self) -> Self: 1281 """ 1282 Add to this object a cell data array 1283 containing the area, volume and edge length 1284 of the cells (when applicable to the object type). 1285 1286 Array names are: `Area`, `Volume`, `Length`. 1287 """ 1288 csf = vtki.new("CellSizeFilter") 1289 csf.SetInputData(self.dataset) 1290 csf.SetComputeArea(1) 1291 csf.SetComputeVolume(1) 1292 csf.SetComputeLength(1) 1293 csf.SetComputeVertexCount(0) 1294 csf.SetAreaArrayName("Area") 1295 csf.SetVolumeArrayName("Volume") 1296 csf.SetLengthArrayName("Length") 1297 csf.Update() 1298 self._update(csf.GetOutput(), reset_locators=False) 1299 return self 1300 1301 def generate_random_data(self) -> Self: 1302 """Fill a dataset with random attributes""" 1303 gen = vtki.new("RandomAttributeGenerator") 1304 gen.SetInputData(self.dataset) 1305 gen.GenerateAllDataOn() 1306 gen.SetDataTypeToFloat() 1307 gen.GeneratePointNormalsOff() 1308 gen.GeneratePointTensorsOn() 1309 gen.GenerateCellScalarsOn() 1310 gen.Update() 1311 self._update(gen.GetOutput(), reset_locators=False) 1312 self.pipeline = utils.OperationNode("generate_random_data", parents=[self]) 1313 return self 1314 1315 def integrate_data(self) -> dict: 1316 """ 1317 Integrate point and cell data arrays while computing length, 1318 area or volume of the domain. It works for 1D, 2D or 3D cells. 1319 1320 For volumetric datasets, this filter ignores all but 3D cells. 1321 It will not compute the volume contained in a closed surface. 1322 1323 Returns a dictionary with keys: `pointdata`, `celldata`, `metadata`, 1324 which contain the integration result for the corresponding attributes. 1325 1326 Examples: 1327 ```python 1328 from vedo import * 1329 surf = Sphere(res=100) 1330 surf.pointdata['scalars'] = np.ones(surf.npoints) 1331 data = surf.integrate_data() 1332 print(data['pointdata']['scalars'], "is equal to 4pi", 4*np.pi) 1333 ``` 1334 1335 ```python 1336 from vedo import * 1337 1338 xcoords1 = np.arange(0, 2.2, 0.2) 1339 xcoords2 = sqrt(np.arange(0, 4.2, 0.2)) 1340 1341 ycoords = np.arange(0, 1.2, 0.2) 1342 1343 surf1 = Grid(s=(xcoords1, ycoords)).rotate_y(-45).lw(2) 1344 surf2 = Grid(s=(xcoords2, ycoords)).rotate_y(-45).lw(2) 1345 1346 surf1.pointdata['scalars'] = surf1.vertices[:,2] 1347 surf2.pointdata['scalars'] = surf2.vertices[:,2] 1348 1349 data1 = surf1.integrate_data() 1350 data2 = surf2.integrate_data() 1351 1352 print(data1['pointdata']['scalars'], 1353 "is equal to", 1354 data2['pointdata']['scalars'], 1355 "even if the grids are different!", 1356 "(= the volume under the surface)" 1357 ) 1358 show(surf1, surf2, N=2, axes=1).close() 1359 ``` 1360 """ 1361 vinteg = vtki.new("IntegrateAttributes") 1362 vinteg.SetInputData(self.dataset) 1363 vinteg.Update() 1364 ugrid = vedo.UnstructuredGrid(vinteg.GetOutput()) 1365 data = dict( 1366 pointdata=ugrid.pointdata.todict(), 1367 celldata=ugrid.celldata.todict(), 1368 metadata=ugrid.metadata.todict(), 1369 ) 1370 return data 1371 1372 def write(self, filename, binary=True) -> None: 1373 """Write object to file.""" 1374 out = vedo.file_io.write(self, filename, binary) 1375 out.pipeline = utils.OperationNode( 1376 "write", parents=[self], comment=filename[:15], shape="folder", c="#8a817c" 1377 ) 1378 1379 def tomesh(self, bounds=(), shrink=0) -> "vedo.Mesh": 1380 """ 1381 Extract boundary geometry from dataset (or convert data to polygonal type). 1382 1383 Two new arrays are added to the mesh: `OriginalCellIds` and `OriginalPointIds` 1384 to keep track of the original mesh elements. 1385 1386 Arguments: 1387 bounds : (list) 1388 specify a sub-region to extract 1389 shrink : (float) 1390 shrink the cells to a fraction of their original size 1391 """ 1392 geo = vtki.new("GeometryFilter") 1393 1394 if shrink: 1395 sf = vtki.new("ShrinkFilter") 1396 sf.SetInputData(self.dataset) 1397 sf.SetShrinkFactor(shrink) 1398 sf.Update() 1399 geo.SetInputData(sf.GetOutput()) 1400 else: 1401 geo.SetInputData(self.dataset) 1402 1403 geo.SetPassThroughCellIds(1) 1404 geo.SetPassThroughPointIds(1) 1405 geo.SetOriginalCellIdsName("OriginalCellIds") 1406 geo.SetOriginalPointIdsName("OriginalPointIds") 1407 geo.SetNonlinearSubdivisionLevel(1) 1408 # geo.MergingOff() # crashes on StructuredGrids 1409 if bounds: 1410 geo.SetExtent(bounds) 1411 geo.ExtentClippingOn() 1412 geo.Update() 1413 msh = vedo.mesh.Mesh(geo.GetOutput()) 1414 msh.pipeline = utils.OperationNode("tomesh", parents=[self], c="#9e2a2b") 1415 return msh 1416 1417 def signed_distance(self, dims=(20, 20, 20), bounds=None, invert=False, max_radius=None) -> "vedo.Volume": 1418 """ 1419 Compute the `Volume` object whose voxels contains the signed distance from 1420 the object. The calling object must have "Normals" defined. 1421 1422 Arguments: 1423 bounds : (list, actor) 1424 bounding box sizes 1425 dims : (list) 1426 dimensions (nr. of voxels) of the output volume. 1427 invert : (bool) 1428 flip the sign 1429 max_radius : (float) 1430 specify how far out to propagate distance calculation 1431 1432 Examples: 1433 - [distance2mesh.py](https://github.com/marcomusy/vedo/blob/master/examples/basic/distance2mesh.py) 1434 1435 ![](https://vedo.embl.es/images/basic/distance2mesh.png) 1436 """ 1437 if bounds is None: 1438 bounds = self.bounds() 1439 if max_radius is None: 1440 max_radius = self.diagonal_size() / 2 1441 dist = vtki.new("SignedDistance") 1442 dist.SetInputData(self.dataset) 1443 dist.SetRadius(max_radius) 1444 dist.SetBounds(bounds) 1445 dist.SetDimensions(dims) 1446 dist.Update() 1447 img = dist.GetOutput() 1448 if invert: 1449 mat = vtki.new("ImageMathematics") 1450 mat.SetInput1Data(img) 1451 mat.SetOperationToMultiplyByK() 1452 mat.SetConstantK(-1) 1453 mat.Update() 1454 img = mat.GetOutput() 1455 1456 vol = vedo.Volume(img) 1457 vol.name = "SignedDistanceVolume" 1458 vol.pipeline = utils.OperationNode( 1459 "signed_distance", 1460 parents=[self], 1461 comment=f"dims={tuple(vol.dimensions())}", 1462 c="#e9c46a:#0096c7", 1463 ) 1464 return vol 1465 1466 def unsigned_distance( 1467 self, dims=(25,25,25), bounds=(), max_radius=0, cap_value=0) -> "vedo.Volume": 1468 """ 1469 Compute the `Volume` object whose voxels contains the unsigned distance. 1470 """ 1471 dist = vtki.new("UnsignedDistance") 1472 dist.SetInputData(self.dataset) 1473 dist.SetDimensions(dims) 1474 1475 if len(bounds) == 6: 1476 dist.SetBounds(bounds) 1477 # elif bounds == "auto": 1478 # dist.AdjustBoundsOn() 1479 else: 1480 dist.SetBounds(self.bounds()) 1481 if not max_radius: 1482 max_radius = self.diagonal_size() / 10 1483 dist.SetRadius(max_radius) 1484 1485 if self.point_locator: 1486 dist.SetLocator(self.point_locator) 1487 1488 if cap_value is not None: 1489 dist.CappingOn() 1490 dist.SetCapValue(cap_value) 1491 dist.SetOutputScalarTypeToFloat() 1492 dist.Update() 1493 vol = vedo.Volume(dist.GetOutput()) 1494 vol.name = "UnsignedDistanceVolume" 1495 vol.pipeline = utils.OperationNode( 1496 "unsigned_distance", parents=[self], c="#e9c46a:#0096c7") 1497 return vol 1498 1499 def smooth_data(self, 1500 niter=10, relaxation_factor=0.1, strategy=0, mask=None, 1501 exclude=("Normals", "TextureCoordinates"), 1502 ) -> Self: 1503 """ 1504 Smooth point attribute data using distance weighted Laplacian kernel. 1505 1506 The effect is to blur regions of high variation and emphasize low variation regions. 1507 1508 Arguments: 1509 niter : (int) 1510 number of iterations 1511 relaxation_factor : (float) 1512 relaxation factor controlling the amount of Laplacian smoothing applied 1513 strategy : (int) 1514 strategy to use for Laplacian smoothing 1515 - 0: use all points, all point data attributes are smoothed 1516 - 1: smooth all point attribute data except those on the boundary 1517 - 2: only point data connected to a boundary point are smoothed 1518 mask : (str, np.ndarray) 1519 array to be used as a mask (ignore then the strategy keyword) 1520 exclude : (list) 1521 list of arrays to be excluded from smoothing 1522 """ 1523 try: 1524 saf = vtki.new("AttributeSmoothingFilter") 1525 except: 1526 vedo.logger.error("smooth_data() only avaialble in VTK>=9.3.0") 1527 return self 1528 saf.SetInputData(self.dataset) 1529 saf.SetRelaxationFactor(relaxation_factor) 1530 saf.SetNumberOfIterations(niter) 1531 1532 for ex in exclude: 1533 saf.AddExcludedArray(ex) 1534 1535 saf.SetWeightsTypeToDistance2() 1536 1537 saf.SetSmoothingStrategy(strategy) 1538 if mask is not None: 1539 saf.SetSmoothingStrategyToSmoothingMask() 1540 if isinstance(mask, str): 1541 mask_ = self.dataset.GetPointData().GetArray(mask) 1542 if not mask_: 1543 vedo.logger.error(f"smooth_data(): mask array {mask} not found") 1544 return self 1545 mask_array = vtki.vtkUnsignedCharArray() 1546 mask_array.ShallowCopy(mask_) 1547 mask_array.SetName(mask_.GetName()) 1548 else: 1549 mask_array = utils.numpy2vtk(mask, dtype=np.uint8) 1550 saf.SetSmoothingMask(mask_array) 1551 1552 saf.Update() 1553 1554 self._update(saf.GetOutput()) 1555 self.pipeline = utils.OperationNode( 1556 "smooth_data", comment=f"strategy {strategy}", parents=[self], c="#9e2a2b" 1557 ) 1558 return self 1559 1560 def compute_streamlines( 1561 self, 1562 seeds: Any, 1563 integrator="rk4", 1564 direction="forward", 1565 initial_step_size=None, 1566 max_propagation=None, 1567 max_steps=10000, 1568 step_length=0, 1569 surface_constrained=False, 1570 compute_vorticity=False, 1571 ) -> Union["vedo.Lines", None]: 1572 """ 1573 Integrate a vector field to generate streamlines. 1574 1575 Arguments: 1576 seeds : (Mesh, Points, list) 1577 starting points of the streamlines 1578 integrator : (str) 1579 type of integration method to be used: 1580 - "rk2" (Runge-Kutta 2) 1581 - "rk4" (Runge-Kutta 4) 1582 - "rk45" (Runge-Kutta 45) 1583 direction : (str) 1584 direction of integration, either "forward", "backward" or "both" 1585 initial_step_size : (float) 1586 initial step size used for line integration 1587 max_propagation : (float) 1588 maximum length of a streamline expressed in absolute units 1589 max_steps : (int) 1590 maximum number of steps for a streamline 1591 step_length : (float) 1592 maximum length of a step expressed in absolute units 1593 surface_constrained : (bool) 1594 whether to stop integrating when the streamline leaves the surface 1595 compute_vorticity : (bool) 1596 whether to compute the vorticity at each streamline point 1597 """ 1598 b = self.dataset.GetBounds() 1599 size = (b[5]-b[4] + b[3]-b[2] + b[1]-b[0]) / 3 1600 if initial_step_size is None: 1601 initial_step_size = size / 1000.0 1602 1603 if max_propagation is None: 1604 max_propagation = size * 2 1605 1606 if utils.is_sequence(seeds): 1607 seeds = vedo.Points(seeds) 1608 1609 sti = vtki.new("StreamTracer") 1610 sti.SetSourceData(seeds.dataset) 1611 if isinstance(self, vedo.RectilinearGrid): 1612 sti.SetInputData(vedo.UnstructuredGrid(self.dataset).dataset) 1613 else: 1614 sti.SetInputDataObject(self.dataset) 1615 1616 sti.SetInitialIntegrationStep(initial_step_size) 1617 sti.SetComputeVorticity(compute_vorticity) 1618 sti.SetMaximumNumberOfSteps(max_steps) 1619 sti.SetMaximumPropagation(max_propagation) 1620 sti.SetSurfaceStreamlines(surface_constrained) 1621 if step_length: 1622 sti.SetMaximumIntegrationStep(step_length) 1623 1624 if "for" in direction: 1625 sti.SetIntegrationDirectionToForward() 1626 elif "back" in direction: 1627 sti.SetIntegrationDirectionToBackward() 1628 elif "both" in direction: 1629 sti.SetIntegrationDirectionToBoth() 1630 else: 1631 vedo.logger.error(f"in compute_streamlines(), unknown direction {direction}") 1632 return None 1633 1634 if integrator == "rk2": 1635 sti.SetIntegratorTypeToRungeKutta2() 1636 elif integrator == "rk4": 1637 sti.SetIntegratorTypeToRungeKutta4() 1638 elif integrator == "rk45": 1639 sti.SetIntegratorTypeToRungeKutta45() 1640 else: 1641 vedo.logger.error(f"in compute_streamlines(), unknown integrator {integrator}") 1642 return None 1643 1644 sti.Update() 1645 1646 stlines = vedo.shapes.Lines(sti.GetOutput(), lw=4) 1647 stlines.name = "StreamLines" 1648 self.pipeline = utils.OperationNode( 1649 "compute_streamlines", comment=f"{integrator}", parents=[self, seeds], c="#9e2a2b" 1650 ) 1651 return stlines
Common algorithms.
379 @property 380 def pointdata(self): 381 """ 382 Create and/or return a `numpy.array` associated to points (vertices). 383 A data array can be indexed either as a string or by an integer number. 384 E.g.: `myobj.pointdata["arrayname"]` 385 386 Usage: 387 388 `myobj.pointdata.keys()` to return the available data array names 389 390 `myobj.pointdata.select(name)` to make this array the active one 391 392 `myobj.pointdata.remove(name)` to remove this array 393 """ 394 return DataArrayHelper(self, 0)
Create and/or return a numpy.array
associated to points (vertices).
A data array can be indexed either as a string or by an integer number.
E.g.: myobj.pointdata["arrayname"]
Usage:
myobj.pointdata.keys()
to return the available data array names
myobj.pointdata.select(name)
to make this array the active one
myobj.pointdata.remove(name)
to remove this array
396 @property 397 def celldata(self): 398 """ 399 Create and/or return a `numpy.array` associated to cells (faces). 400 A data array can be indexed either as a string or by an integer number. 401 E.g.: `myobj.celldata["arrayname"]` 402 403 Usage: 404 405 `myobj.celldata.keys()` to return the available data array names 406 407 `myobj.celldata.select(name)` to make this array the active one 408 409 `myobj.celldata.remove(name)` to remove this array 410 """ 411 return DataArrayHelper(self, 1)
Create and/or return a numpy.array
associated to cells (faces).
A data array can be indexed either as a string or by an integer number.
E.g.: myobj.celldata["arrayname"]
Usage:
myobj.celldata.keys()
to return the available data array names
myobj.celldata.select(name)
to make this array the active one
myobj.celldata.remove(name)
to remove this array
413 @property 414 def metadata(self): 415 """ 416 Create and/or return a `numpy.array` associated to neither cells nor faces. 417 A data array can be indexed either as a string or by an integer number. 418 E.g.: `myobj.metadata["arrayname"]` 419 420 Usage: 421 422 `myobj.metadata.keys()` to return the available data array names 423 424 `myobj.metadata.select(name)` to make this array the active one 425 426 `myobj.metadata.remove(name)` to remove this array 427 """ 428 return DataArrayHelper(self, 2)
Create and/or return a numpy.array
associated to neither cells nor faces.
A data array can be indexed either as a string or by an integer number.
E.g.: myobj.metadata["arrayname"]
Usage:
myobj.metadata.keys()
to return the available data array names
myobj.metadata.select(name)
to make this array the active one
myobj.metadata.remove(name)
to remove this array
430 def memory_address(self) -> int: 431 """ 432 Return a unique memory address integer which may serve as the ID of the 433 object, or passed to c++ code. 434 """ 435 # https://www.linkedin.com/pulse/speedup-your-code-accessing-python-vtk-objects-from-c-pletzer/ 436 # https://github.com/tfmoraes/polydata_connectivity 437 return int(self.dataset.GetAddressAsString("")[5:], 16)
Return a unique memory address integer which may serve as the ID of the object, or passed to c++ code.
439 def memory_size(self) -> int: 440 """Return the size in bytes of the object in memory.""" 441 return self.dataset.GetActualMemorySize()
Return the size in bytes of the object in memory.
443 def modified(self) -> Self: 444 """Use in conjunction with `tonumpy()` to update any modifications to the image array.""" 445 self.dataset.GetPointData().Modified() 446 scals = self.dataset.GetPointData().GetScalars() 447 if scals: 448 scals.Modified() 449 return self
Use in conjunction with tonumpy()
to update any modifications to the image array.
451 def box(self, scale=1, padding=0) -> "vedo.Mesh": 452 """ 453 Return the bounding box as a new `Mesh` object. 454 455 Arguments: 456 scale : (float) 457 box size can be scaled by a factor 458 padding : (float, list) 459 a constant padding can be added (can be a list `[padx,pady,padz]`) 460 """ 461 b = self.bounds() 462 if not utils.is_sequence(padding): 463 padding = [padding, padding, padding] 464 length, width, height = b[1] - b[0], b[3] - b[2], b[5] - b[4] 465 tol = (length + width + height) / 30000 # useful for boxing text 466 pos = [(b[0] + b[1]) / 2, (b[3] + b[2]) / 2, (b[5] + b[4]) / 2 - tol] 467 bx = vedo.shapes.Box( 468 pos, 469 length * scale + padding[0], 470 width * scale + padding[1], 471 height * scale + padding[2], 472 c="gray", 473 ) 474 try: 475 pr = vtki.vtkProperty() 476 pr.DeepCopy(self.properties) 477 bx.actor.SetProperty(pr) 478 bx.properties = pr 479 except (AttributeError, TypeError): 480 pass 481 bx.flat().lighting("off").wireframe(True) 482 return bx
Return the bounding box as a new Mesh
object.
Arguments:
- scale : (float) box size can be scaled by a factor
- padding : (float, list)
a constant padding can be added (can be a list
[padx,pady,padz]
)
484 def update_dataset(self, dataset, **kwargs) -> Self: 485 """Update the dataset of the object with the provided VTK dataset.""" 486 self._update(dataset, **kwargs) 487 return self
Update the dataset of the object with the provided VTK dataset.
489 def bounds(self) -> np.ndarray: 490 """ 491 Get the object bounds. 492 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 493 """ 494 try: # this is very slow for large meshes 495 pts = self.vertices 496 xmin, ymin, zmin = np.min(pts, axis=0) 497 xmax, ymax, zmax = np.max(pts, axis=0) 498 return np.array([xmin, xmax, ymin, ymax, zmin, zmax]) 499 except (AttributeError, ValueError): 500 return np.array(self.dataset.GetBounds())
Get the object bounds.
Returns a list in format [xmin,xmax, ymin,ymax, zmin,zmax]
.
502 def xbounds(self, i=None) -> np.ndarray: 503 """Get the bounds `[xmin,xmax]`. Can specify upper or lower with i (0,1).""" 504 b = self.bounds() 505 if i is not None: 506 return b[i] 507 return np.array([b[0], b[1]])
Get the bounds [xmin,xmax]
. Can specify upper or lower with i (0,1).
509 def ybounds(self, i=None) -> np.ndarray: 510 """Get the bounds `[ymin,ymax]`. Can specify upper or lower with i (0,1).""" 511 b = self.bounds() 512 if i == 0: 513 return b[2] 514 if i == 1: 515 return b[3] 516 return np.array([b[2], b[3]])
Get the bounds [ymin,ymax]
. Can specify upper or lower with i (0,1).
518 def zbounds(self, i=None) -> np.ndarray: 519 """Get the bounds `[zmin,zmax]`. Can specify upper or lower with i (0,1).""" 520 b = self.bounds() 521 if i == 0: 522 return b[4] 523 if i == 1: 524 return b[5] 525 return np.array([b[4], b[5]])
Get the bounds [zmin,zmax]
. Can specify upper or lower with i (0,1).
527 def diagonal_size(self) -> float: 528 """Get the length of the diagonal of the bounding box.""" 529 b = self.bounds() 530 return np.sqrt((b[1] - b[0])**2 + (b[3] - b[2])**2 + (b[5] - b[4])**2)
Get the length of the diagonal of the bounding box.
532 def average_size(self) -> float: 533 """ 534 Calculate and return the average size of the object. 535 This is the mean of the vertex distances from the center of mass. 536 """ 537 coords = self.vertices 538 cm = np.mean(coords, axis=0) 539 if coords.shape[0] == 0: 540 return 0.0 541 cc = coords - cm 542 return np.mean(np.linalg.norm(cc, axis=1))
Calculate and return the average size of the object. This is the mean of the vertex distances from the center of mass.
544 def center_of_mass(self) -> np.ndarray: 545 """Get the center of mass of the object.""" 546 if isinstance(self, (vedo.RectilinearGrid, vedo.Volume)): 547 return np.array(self.dataset.GetCenter()) 548 cmf = vtki.new("CenterOfMass") 549 cmf.SetInputData(self.dataset) 550 cmf.Update() 551 c = cmf.GetCenter() 552 return np.array(c)
Get the center of mass of the object.
554 def copy_data_from(self, obj: Any) -> Self: 555 """Copy all data (point and cell data) from this input object""" 556 self.dataset.GetPointData().PassData(obj.dataset.GetPointData()) 557 self.dataset.GetCellData().PassData(obj.dataset.GetCellData()) 558 self.pipeline = utils.OperationNode( 559 "copy_data_from", 560 parents=[self, obj], 561 comment=f"{obj.__class__.__name__}", 562 shape="note", 563 c="#ccc5b9", 564 ) 565 return self
Copy all data (point and cell data) from this input object
567 def inputdata(self): 568 """Obsolete, use `.dataset` instead.""" 569 colors.printc("WARNING: 'inputdata()' is obsolete, use '.dataset' instead.", c="y") 570 return self.dataset
Obsolete, use .dataset
instead.
572 @property 573 def npoints(self): 574 """Retrieve the number of points (or vertices).""" 575 return self.dataset.GetNumberOfPoints()
Retrieve the number of points (or vertices).
577 @property 578 def nvertices(self): 579 """Retrieve the number of vertices (or points).""" 580 return self.dataset.GetNumberOfPoints()
Retrieve the number of vertices (or points).
582 @property 583 def ncells(self): 584 """Retrieve the number of cells.""" 585 return self.dataset.GetNumberOfCells()
Retrieve the number of cells.
587 def points(self, pts=None): 588 """Obsolete, use `self.vertices` or `self.coordinates` instead.""" 589 if pts is None: ### getter 590 591 if warnings["points_getter"]: 592 colors.printc(warnings["points_getter"], c="y") 593 warnings["points_getter"] = "" 594 return self.vertices 595 596 else: ### setter 597 598 if warnings["points_setter"]: 599 colors.printc(warnings["points_setter"], c="y") 600 warnings["points_setter"] = "" 601 602 pts = np.asarray(pts, dtype=np.float32) 603 604 if pts.ndim == 1: 605 ### getter by point index ################### 606 indices = pts.astype(int) 607 vpts = self.dataset.GetPoints() 608 arr = utils.vtk2numpy(vpts.GetData()) 609 return arr[indices] ########### 610 611 ### setter #################################### 612 if pts.shape[1] == 2: 613 pts = np.c_[pts, np.zeros(pts.shape[0], dtype=np.float32)] 614 arr = utils.numpy2vtk(pts, dtype=np.float32) 615 616 vpts = self.dataset.GetPoints() 617 vpts.SetData(arr) 618 vpts.Modified() 619 # reset mesh to identity matrix position/rotation: 620 self.point_locator = None 621 self.cell_locator = None 622 self.line_locator = None 623 self.transform = LinearTransform() 624 return self
Obsolete, use self.vertices
or self.coordinates
instead.
626 @property 627 def cell_centers(self): 628 """ 629 Get the coordinates of the cell centers. 630 631 Examples: 632 - [delaunay2d.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/delaunay2d.py) 633 634 See also: `CellCenters()`. 635 """ 636 vcen = vtki.new("CellCenters") 637 vcen.CopyArraysOff() 638 vcen.SetInputData(self.dataset) 639 vcen.Update() 640 return utils.vtk2numpy(vcen.GetOutput().GetPoints().GetData())
642 @property 643 def lines(self): 644 """ 645 Get lines connectivity ids as a python array 646 formatted as `[[id0,id1], [id3,id4], ...]` 647 648 See also: `lines_as_flat_array()`. 649 """ 650 # Get cell connettivity ids as a 1D array. The vtk format is: 651 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 652 try: 653 arr1d = utils.vtk2numpy(self.dataset.GetLines().GetData()) 654 except AttributeError: 655 return np.array([]) 656 i = 0 657 conn = [] 658 n = len(arr1d) 659 for _ in range(n): 660 cell = [arr1d[i + k + 1] for k in range(arr1d[i])] 661 conn.append(cell) 662 i += arr1d[i] + 1 663 if i >= n: 664 break 665 666 return conn # cannot always make a numpy array of it!
Get lines connectivity ids as a python array
formatted as [[id0,id1], [id3,id4], ...]
See also: lines_as_flat_array()
.
668 @property 669 def lines_as_flat_array(self): 670 """ 671 Get lines connectivity ids as a 1D numpy array. 672 Format is e.g. [2, 10,20, 3, 10,11,12, 2, 70,80, ...] 673 674 See also: `lines()`. 675 """ 676 try: 677 return utils.vtk2numpy(self.dataset.GetLines().GetData()) 678 except AttributeError: 679 return np.array([])
Get lines connectivity ids as a 1D numpy array. Format is e.g. [2, 10,20, 3, 10,11,12, 2, 70,80, ...]
See also: lines()
.
681 def mark_boundaries(self) -> Self: 682 """ 683 Mark cells and vertices if they lie on a boundary. 684 A new array called `BoundaryCells` is added to the object. 685 """ 686 mb = vtki.new("MarkBoundaryFilter") 687 mb.SetInputData(self.dataset) 688 mb.Update() 689 self.dataset.DeepCopy(mb.GetOutput()) 690 self.pipeline = utils.OperationNode("mark_boundaries", parents=[self]) 691 return self
Mark cells and vertices if they lie on a boundary.
A new array called BoundaryCells
is added to the object.
693 def find_cells_in_bounds(self, xbounds=(), ybounds=(), zbounds=()) -> np.ndarray: 694 """ 695 Find cells that are within the specified bounds. 696 """ 697 try: 698 xbounds = list(xbounds.bounds()) 699 except AttributeError: 700 pass 701 702 if len(xbounds) == 6: 703 bnds = xbounds 704 else: 705 bnds = list(self.bounds()) 706 if len(xbounds) == 2: 707 bnds[0] = xbounds[0] 708 bnds[1] = xbounds[1] 709 if len(ybounds) == 2: 710 bnds[2] = ybounds[0] 711 bnds[3] = ybounds[1] 712 if len(zbounds) == 2: 713 bnds[4] = zbounds[0] 714 bnds[5] = zbounds[1] 715 716 cell_ids = vtki.vtkIdList() 717 if not self.cell_locator: 718 self.cell_locator = vtki.new("CellTreeLocator") 719 self.cell_locator.SetDataSet(self.dataset) 720 self.cell_locator.BuildLocator() 721 self.cell_locator.FindCellsWithinBounds(bnds, cell_ids) 722 cids = [] 723 for i in range(cell_ids.GetNumberOfIds()): 724 cid = cell_ids.GetId(i) 725 cids.append(cid) 726 return np.array(cids)
Find cells that are within the specified bounds.
728 def find_cells_along_line(self, p0, p1, tol=0.001) -> np.ndarray: 729 """ 730 Find cells that are intersected by a line segment. 731 """ 732 cell_ids = vtki.vtkIdList() 733 if not self.cell_locator: 734 self.cell_locator = vtki.new("CellTreeLocator") 735 self.cell_locator.SetDataSet(self.dataset) 736 self.cell_locator.BuildLocator() 737 self.cell_locator.FindCellsAlongLine(p0, p1, tol, cell_ids) 738 cids = [] 739 for i in range(cell_ids.GetNumberOfIds()): 740 cid = cell_ids.GetId(i) 741 cids.append(cid) 742 return np.array(cids)
Find cells that are intersected by a line segment.
744 def find_cells_along_plane(self, origin, normal, tol=0.001) -> np.ndarray: 745 """ 746 Find cells that are intersected by a plane. 747 """ 748 cell_ids = vtki.vtkIdList() 749 if not self.cell_locator: 750 self.cell_locator = vtki.new("CellTreeLocator") 751 self.cell_locator.SetDataSet(self.dataset) 752 self.cell_locator.BuildLocator() 753 self.cell_locator.FindCellsAlongPlane(origin, normal, tol, cell_ids) 754 cids = [] 755 for i in range(cell_ids.GetNumberOfIds()): 756 cid = cell_ids.GetId(i) 757 cids.append(cid) 758 return np.array(cids)
Find cells that are intersected by a plane.
760 def keep_cell_types(self, types=()): 761 """ 762 Extract cells of a specific type. 763 764 Check the VTK cell types here: 765 https://vtk.org/doc/nightly/html/vtkCellType_8h.html 766 """ 767 fe = vtki.new("ExtractCellsByType") 768 fe.SetInputData(self.dataset) 769 for t in types: 770 try: 771 if utils.is_integer(t): 772 it = t 773 else: 774 it = vtki.cell_types[t.upper()] 775 except KeyError: 776 vedo.logger.error(f"Cell type '{t}' not recognized") 777 continue 778 fe.AddCellType(it) 779 fe.Update() 780 self._update(fe.GetOutput()) 781 return self
Extract cells of a specific type.
Check the VTK cell types here: https://vtk.org/doc/nightly/html/vtkCellType_8h.html
783 def map_cells_to_points(self, arrays=(), move=False) -> Self: 784 """ 785 Interpolate cell data (i.e., data specified per cell or face) 786 into point data (i.e., data specified at each vertex). 787 The method of transformation is based on averaging the data values 788 of all cells using a particular point. 789 790 A custom list of arrays to be mapped can be passed in input. 791 792 Set `move=True` to delete the original `celldata` array. 793 """ 794 c2p = vtki.new("CellDataToPointData") 795 c2p.SetInputData(self.dataset) 796 if not move: 797 c2p.PassCellDataOn() 798 if arrays: 799 c2p.ClearCellDataArrays() 800 c2p.ProcessAllArraysOff() 801 for arr in arrays: 802 c2p.AddCellDataArray(arr) 803 else: 804 c2p.ProcessAllArraysOn() 805 c2p.Update() 806 self._update(c2p.GetOutput(), reset_locators=False) 807 self.mapper.SetScalarModeToUsePointData() 808 self.pipeline = utils.OperationNode("map_cells_to_points", parents=[self]) 809 return self
Interpolate cell data (i.e., data specified per cell or face) into point data (i.e., data specified at each vertex). The method of transformation is based on averaging the data values of all cells using a particular point.
A custom list of arrays to be mapped can be passed in input.
Set move=True
to delete the original celldata
array.
811 @property 812 def vertices(self): 813 """Return the vertices (points) coordinates.""" 814 try: 815 # for polydata and unstructured grid 816 varr = self.dataset.GetPoints().GetData() 817 except (AttributeError, TypeError): 818 try: 819 # for RectilinearGrid, StructuredGrid 820 vpts = vtki.vtkPoints() 821 self.dataset.GetPoints(vpts) 822 varr = vpts.GetData() 823 except (AttributeError, TypeError): 824 try: 825 # for ImageData 826 v2p = vtki.new("ImageToPoints") 827 v2p.SetInputData(self.dataset) 828 v2p.Update() 829 varr = v2p.GetOutput().GetPoints().GetData() 830 except AttributeError: 831 return np.array([]) 832 833 return utils.vtk2numpy(varr)
Return the vertices (points) coordinates.
854 @property 855 def coordinates(self): 856 """Return the vertices (points) coordinates. Same as `vertices`.""" 857 return self.vertices
Return the vertices (points) coordinates. Same as vertices
.
864 @property 865 def cells_as_flat_array(self): 866 """ 867 Get cell connectivity ids as a 1D numpy array. 868 Format is e.g. [3, 10,20,30 4, 10,11,12,13 ...] 869 """ 870 try: 871 # valid for unstructured grid 872 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 873 except AttributeError: 874 # valid for polydata 875 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 876 return arr1d
Get cell connectivity ids as a 1D numpy array. Format is e.g. [3, 10,20,30 4, 10,11,12,13 ...]
878 @property 879 def cells(self): 880 """ 881 Get the cells connectivity ids as a numpy array. 882 883 The output format is: `[[id0 ... idn], [id0 ... idm], etc]`. 884 """ 885 try: 886 # valid for unstructured grid 887 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 888 except AttributeError: 889 try: 890 # valid for polydata 891 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 892 except AttributeError: 893 vedo.logger.warning(f"Cannot get cells for {type(self)}") 894 return np.array([]) 895 896 # Get cell connettivity ids as a 1D array. vtk format is: 897 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 898 i = 0 899 conn = [] 900 n = len(arr1d) 901 if n: 902 while True: 903 cell = [arr1d[i + k] for k in range(1, arr1d[i] + 1)] 904 conn.append(cell) 905 i += arr1d[i] + 1 906 if i >= n: 907 break 908 return conn
Get the cells connectivity ids as a numpy array.
The output format is: [[id0 ... idn], [id0 ... idm], etc]
.
910 def map_points_to_cells(self, arrays=(), move=False) -> Self: 911 """ 912 Interpolate point data (i.e., data specified per point or vertex) 913 into cell data (i.e., data specified per cell). 914 The method of transformation is based on averaging the data values 915 of all points defining a particular cell. 916 917 A custom list of arrays to be mapped can be passed in input. 918 919 Set `move=True` to delete the original `pointdata` array. 920 921 Examples: 922 - [mesh_map2cell.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/mesh_map2cell.py) 923 """ 924 p2c = vtki.new("PointDataToCellData") 925 p2c.SetInputData(self.dataset) 926 if not move: 927 p2c.PassPointDataOn() 928 if arrays: 929 p2c.ClearPointDataArrays() 930 p2c.ProcessAllArraysOff() 931 for arr in arrays: 932 p2c.AddPointDataArray(arr) 933 else: 934 p2c.ProcessAllArraysOn() 935 p2c.Update() 936 self._update(p2c.GetOutput(), reset_locators=False) 937 self.mapper.SetScalarModeToUseCellData() 938 self.pipeline = utils.OperationNode("map_points_to_cells", parents=[self]) 939 return self
Interpolate point data (i.e., data specified per point or vertex) into cell data (i.e., data specified per cell). The method of transformation is based on averaging the data values of all points defining a particular cell.
A custom list of arrays to be mapped can be passed in input.
Set move=True
to delete the original pointdata
array.
Examples:
941 def resample_data_from(self, source, tol=None, categorical=False) -> Self: 942 """ 943 Resample point and cell data from another dataset. 944 The output has the same structure but its point data have 945 the resampled values from target. 946 947 Use `tol` to set the tolerance used to compute whether 948 a point in the source is in a cell of the current object. 949 Points without resampled values, and their cells, are marked as blank. 950 If the data is categorical, then the resulting data will be determined 951 by a nearest neighbor interpolation scheme. 952 953 Example: 954 ```python 955 from vedo import * 956 m1 = Mesh(dataurl+'bunny.obj')#.add_gaussian_noise(0.1) 957 pts = m1.vertices 958 ces = m1.cell_centers 959 m1.pointdata["xvalues"] = np.power(pts[:,0], 3) 960 m1.celldata["yvalues"] = np.power(ces[:,1], 3) 961 m2 = Mesh(dataurl+'bunny.obj') 962 m2.resample_data_from(m1) 963 # print(m2.pointdata["xvalues"]) 964 show(m1, m2 , N=2, axes=1) 965 ``` 966 """ 967 rs = vtki.new("ResampleWithDataSet") 968 rs.SetInputData(self.dataset) 969 rs.SetSourceData(source.dataset) 970 971 rs.SetPassPointArrays(True) 972 rs.SetPassCellArrays(True) 973 rs.SetPassFieldArrays(True) 974 rs.SetCategoricalData(categorical) 975 976 rs.SetComputeTolerance(True) 977 if tol: 978 rs.SetComputeTolerance(False) 979 rs.SetTolerance(tol) 980 rs.Update() 981 self._update(rs.GetOutput(), reset_locators=False) 982 self.pipeline = utils.OperationNode( 983 "resample_data_from", 984 comment=f"{source.__class__.__name__}", 985 parents=[self, source], 986 ) 987 return self
Resample point and cell data from another dataset. The output has the same structure but its point data have the resampled values from target.
Use tol
to set the tolerance used to compute whether
a point in the source is in a cell of the current object.
Points without resampled values, and their cells, are marked as blank.
If the data is categorical, then the resulting data will be determined
by a nearest neighbor interpolation scheme.
Example:
from vedo import *
m1 = Mesh(dataurl+'bunny.obj')#.add_gaussian_noise(0.1)
pts = m1.vertices
ces = m1.cell_centers
m1.pointdata["xvalues"] = np.power(pts[:,0], 3)
m1.celldata["yvalues"] = np.power(ces[:,1], 3)
m2 = Mesh(dataurl+'bunny.obj')
m2.resample_data_from(m1)
# print(m2.pointdata["xvalues"])
show(m1, m2 , N=2, axes=1)
989 def interpolate_data_from( 990 self, 991 source, 992 radius=None, 993 n=None, 994 kernel="shepard", 995 exclude=("Normals",), 996 on="points", 997 null_strategy=1, 998 null_value=0, 999 ) -> Self: 1000 """ 1001 Interpolate over source to port its data onto the current object using various kernels. 1002 1003 If n (number of closest points to use) is set then radius value is ignored. 1004 1005 Check out also: 1006 `probe()` which in many cases can be faster. 1007 1008 Arguments: 1009 kernel : (str) 1010 available kernels are [shepard, gaussian, linear] 1011 null_strategy : (int) 1012 specify a strategy to use when encountering a "null" point 1013 during the interpolation process. Null points occur when the local neighborhood 1014 (of nearby points to interpolate from) is empty. 1015 1016 - Case 0: an output array is created that marks points 1017 as being valid (=1) or null (invalid =0), and the null_value is set as well 1018 - Case 1: the output data value(s) are set to the provided null_value 1019 - Case 2: simply use the closest point to perform the interpolation. 1020 null_value : (float) 1021 see above. 1022 1023 Examples: 1024 - [interpolate_scalar1.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar1.py) 1025 - [interpolate_scalar3.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar3.py) 1026 - [interpolate_scalar4.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar4.py) 1027 - [image_probe.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/image_probe.py) 1028 1029 ![](https://vedo.embl.es/images/advanced/interpolateMeshArray.png) 1030 """ 1031 if radius is None and not n: 1032 vedo.logger.error("in interpolate_data_from(): please set either radius or n") 1033 raise RuntimeError 1034 1035 if on == "points": 1036 points = source.dataset 1037 elif on == "cells": 1038 c2p = vtki.new("CellDataToPointData") 1039 c2p.SetInputData(source.dataset) 1040 c2p.Update() 1041 points = c2p.GetOutput() 1042 else: 1043 vedo.logger.error("in interpolate_data_from(), on must be on points or cells") 1044 raise RuntimeError() 1045 1046 locator = vtki.new("PointLocator") 1047 locator.SetDataSet(points) 1048 locator.BuildLocator() 1049 1050 if kernel.lower() == "shepard": 1051 kern = vtki.new("ShepardKernel") 1052 kern.SetPowerParameter(2) 1053 elif kernel.lower() == "gaussian": 1054 kern = vtki.new("GaussianKernel") 1055 kern.SetSharpness(2) 1056 elif kernel.lower() == "linear": 1057 kern = vtki.new("LinearKernel") 1058 else: 1059 vedo.logger.error("available kernels are: [shepard, gaussian, linear]") 1060 raise RuntimeError() 1061 1062 if n: 1063 kern.SetNumberOfPoints(n) 1064 kern.SetKernelFootprintToNClosest() 1065 else: 1066 kern.SetRadius(radius) 1067 kern.SetKernelFootprintToRadius() 1068 1069 interpolator = vtki.new("PointInterpolator") 1070 interpolator.SetInputData(self.dataset) 1071 interpolator.SetSourceData(points) 1072 interpolator.SetKernel(kern) 1073 interpolator.SetLocator(locator) 1074 interpolator.PassFieldArraysOn() 1075 interpolator.SetNullPointsStrategy(null_strategy) 1076 interpolator.SetNullValue(null_value) 1077 interpolator.SetValidPointsMaskArrayName("ValidPointMask") 1078 for ex in exclude: 1079 interpolator.AddExcludedArray(ex) 1080 interpolator.Update() 1081 1082 if on == "cells": 1083 p2c = vtki.new("PointDataToCellData") 1084 p2c.SetInputData(interpolator.GetOutput()) 1085 p2c.Update() 1086 cpoly = p2c.GetOutput() 1087 else: 1088 cpoly = interpolator.GetOutput() 1089 1090 self._update(cpoly, reset_locators=False) 1091 1092 self.pipeline = utils.OperationNode("interpolate_data_from", parents=[self, source]) 1093 return self
Interpolate over source to port its data onto the current object using various kernels.
If n (number of closest points to use) is set then radius value is ignored.
Check out also:
probe()
which in many cases can be faster.
Arguments:
- kernel : (str) available kernels are [shepard, gaussian, linear]
null_strategy : (int) specify a strategy to use when encountering a "null" point during the interpolation process. Null points occur when the local neighborhood (of nearby points to interpolate from) is empty.
- Case 0: an output array is created that marks points as being valid (=1) or null (invalid =0), and the null_value is set as well
- Case 1: the output data value(s) are set to the provided null_value
- Case 2: simply use the closest point to perform the interpolation.
- null_value : (float) see above.
Examples:
1095 def add_ids(self) -> Self: 1096 """ 1097 Generate point and cell ids arrays. 1098 1099 Two new arrays are added to the mesh: `PointID` and `CellID`. 1100 """ 1101 ids = vtki.new("IdFilter") 1102 ids.SetInputData(self.dataset) 1103 ids.PointIdsOn() 1104 ids.CellIdsOn() 1105 ids.FieldDataOff() 1106 ids.SetPointIdsArrayName("PointID") 1107 ids.SetCellIdsArrayName("CellID") 1108 ids.Update() 1109 self._update(ids.GetOutput(), reset_locators=False) 1110 self.pipeline = utils.OperationNode("add_ids", parents=[self]) 1111 return self
Generate point and cell ids arrays.
Two new arrays are added to the mesh: PointID
and CellID
.
1113 def gradient(self, input_array=None, on="points", fast=False) -> np.ndarray: 1114 """ 1115 Compute and return the gradiend of the active scalar field as a numpy array. 1116 1117 Arguments: 1118 input_array : (str) 1119 array of the scalars to compute the gradient, 1120 if None the current active array is selected 1121 on : (str) 1122 compute either on 'points' or 'cells' data 1123 fast : (bool) 1124 if True, will use a less accurate algorithm 1125 that performs fewer derivative calculations (and is therefore faster). 1126 1127 Examples: 1128 - [isolines.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/isolines.py) 1129 1130 ![](https://user-images.githubusercontent.com/32848391/72433087-f00a8780-3798-11ea-9778-991f0abeca70.png) 1131 """ 1132 gra = vtki.new("GradientFilter") 1133 if on.startswith("p"): 1134 varr = self.dataset.GetPointData() 1135 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1136 elif on.startswith("c"): 1137 varr = self.dataset.GetCellData() 1138 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1139 else: 1140 vedo.logger.error(f"in gradient: unknown option {on}") 1141 raise RuntimeError 1142 1143 if input_array is None: 1144 if varr.GetScalars(): 1145 input_array = varr.GetScalars().GetName() 1146 else: 1147 vedo.logger.error(f"in gradient: no scalars found for {on}") 1148 raise RuntimeError 1149 1150 gra.SetInputData(self.dataset) 1151 gra.SetInputScalars(tp, input_array) 1152 gra.SetResultArrayName("Gradient") 1153 gra.SetFasterApproximation(fast) 1154 gra.ComputeDivergenceOff() 1155 gra.ComputeVorticityOff() 1156 gra.ComputeGradientOn() 1157 gra.Update() 1158 if on.startswith("p"): 1159 gvecs = utils.vtk2numpy(gra.GetOutput().GetPointData().GetArray("Gradient")) 1160 else: 1161 gvecs = utils.vtk2numpy(gra.GetOutput().GetCellData().GetArray("Gradient")) 1162 return gvecs
Compute and return the gradiend of the active scalar field as a numpy array.
Arguments:
- input_array : (str) array of the scalars to compute the gradient, if None the current active array is selected
- on : (str) compute either on 'points' or 'cells' data
- fast : (bool) if True, will use a less accurate algorithm that performs fewer derivative calculations (and is therefore faster).
Examples:
1164 def divergence(self, array_name=None, on="points", fast=False) -> np.ndarray: 1165 """ 1166 Compute and return the divergence of a vector field as a numpy array. 1167 1168 Arguments: 1169 array_name : (str) 1170 name of the array of vectors to compute the divergence, 1171 if None the current active array is selected 1172 on : (str) 1173 compute either on 'points' or 'cells' data 1174 fast : (bool) 1175 if True, will use a less accurate algorithm 1176 that performs fewer derivative calculations (and is therefore faster). 1177 """ 1178 div = vtki.new("GradientFilter") 1179 if on.startswith("p"): 1180 varr = self.dataset.GetPointData() 1181 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1182 elif on.startswith("c"): 1183 varr = self.dataset.GetCellData() 1184 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1185 else: 1186 vedo.logger.error(f"in divergence(): unknown option {on}") 1187 raise RuntimeError 1188 1189 if array_name is None: 1190 if varr.GetVectors(): 1191 array_name = varr.GetVectors().GetName() 1192 else: 1193 vedo.logger.error(f"in divergence(): no vectors found for {on}") 1194 raise RuntimeError 1195 1196 div.SetInputData(self.dataset) 1197 div.SetInputScalars(tp, array_name) 1198 div.ComputeDivergenceOn() 1199 div.ComputeGradientOff() 1200 div.ComputeVorticityOff() 1201 div.SetDivergenceArrayName("Divergence") 1202 div.SetFasterApproximation(fast) 1203 div.Update() 1204 if on.startswith("p"): 1205 dvecs = utils.vtk2numpy(div.GetOutput().GetPointData().GetArray("Divergence")) 1206 else: 1207 dvecs = utils.vtk2numpy(div.GetOutput().GetCellData().GetArray("Divergence")) 1208 return dvecs
Compute and return the divergence of a vector field as a numpy array.
Arguments:
- array_name : (str) name of the array of vectors to compute the divergence, if None the current active array is selected
- on : (str) compute either on 'points' or 'cells' data
- fast : (bool) if True, will use a less accurate algorithm that performs fewer derivative calculations (and is therefore faster).
1210 def vorticity(self, array_name=None, on="points", fast=False) -> np.ndarray: 1211 """ 1212 Compute and return the vorticity of a vector field as a numpy array. 1213 1214 Arguments: 1215 array_name : (str) 1216 name of the array to compute the vorticity, 1217 if None the current active array is selected 1218 on : (str) 1219 compute either on 'points' or 'cells' data 1220 fast : (bool) 1221 if True, will use a less accurate algorithm 1222 that performs fewer derivative calculations (and is therefore faster). 1223 """ 1224 vort = vtki.new("GradientFilter") 1225 if on.startswith("p"): 1226 varr = self.dataset.GetPointData() 1227 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1228 elif on.startswith("c"): 1229 varr = self.dataset.GetCellData() 1230 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1231 else: 1232 vedo.logger.error(f"in vorticity(): unknown option {on}") 1233 raise RuntimeError 1234 1235 if array_name is None: 1236 if varr.GetVectors(): 1237 array_name = varr.GetVectors().GetName() 1238 else: 1239 vedo.logger.error(f"in vorticity(): no vectors found for {on}") 1240 raise RuntimeError 1241 1242 vort.SetInputData(self.dataset) 1243 vort.SetInputScalars(tp, array_name) 1244 vort.ComputeDivergenceOff() 1245 vort.ComputeGradientOff() 1246 vort.ComputeVorticityOn() 1247 vort.SetVorticityArrayName("Vorticity") 1248 vort.SetFasterApproximation(fast) 1249 vort.Update() 1250 if on.startswith("p"): 1251 vvecs = utils.vtk2numpy(vort.GetOutput().GetPointData().GetArray("Vorticity")) 1252 else: 1253 vvecs = utils.vtk2numpy(vort.GetOutput().GetCellData().GetArray("Vorticity")) 1254 return vvecs
Compute and return the vorticity of a vector field as a numpy array.
Arguments:
- array_name : (str) name of the array to compute the vorticity, if None the current active array is selected
- on : (str) compute either on 'points' or 'cells' data
- fast : (bool) if True, will use a less accurate algorithm that performs fewer derivative calculations (and is therefore faster).
1256 def probe(self, source) -> Self: 1257 """ 1258 Takes a data set and probes its scalars at the specified points in space. 1259 1260 Note that a mask is also output with valid/invalid points which can be accessed 1261 with `mesh.pointdata['ValidPointMask']`. 1262 1263 Check out also: 1264 `interpolate_data_from()` 1265 1266 Examples: 1267 - [probe_points.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/probe_points.py) 1268 1269 ![](https://vedo.embl.es/images/volumetric/probePoints.png) 1270 """ 1271 probe_filter = vtki.new("ProbeFilter") 1272 probe_filter.SetSourceData(source.dataset) 1273 probe_filter.SetInputData(self.dataset) 1274 probe_filter.Update() 1275 self._update(probe_filter.GetOutput(), reset_locators=False) 1276 self.pipeline = utils.OperationNode("probe", parents=[self, source]) 1277 self.pointdata.rename("vtkValidPointMask", "ValidPointMask") 1278 return self
Takes a data set and probes its scalars at the specified points in space.
Note that a mask is also output with valid/invalid points which can be accessed
with mesh.pointdata['ValidPointMask']
.
Check out also:
Examples:
1280 def compute_cell_size(self) -> Self: 1281 """ 1282 Add to this object a cell data array 1283 containing the area, volume and edge length 1284 of the cells (when applicable to the object type). 1285 1286 Array names are: `Area`, `Volume`, `Length`. 1287 """ 1288 csf = vtki.new("CellSizeFilter") 1289 csf.SetInputData(self.dataset) 1290 csf.SetComputeArea(1) 1291 csf.SetComputeVolume(1) 1292 csf.SetComputeLength(1) 1293 csf.SetComputeVertexCount(0) 1294 csf.SetAreaArrayName("Area") 1295 csf.SetVolumeArrayName("Volume") 1296 csf.SetLengthArrayName("Length") 1297 csf.Update() 1298 self._update(csf.GetOutput(), reset_locators=False) 1299 return self
Add to this object a cell data array containing the area, volume and edge length of the cells (when applicable to the object type).
Array names are: Area
, Volume
, Length
.
1301 def generate_random_data(self) -> Self: 1302 """Fill a dataset with random attributes""" 1303 gen = vtki.new("RandomAttributeGenerator") 1304 gen.SetInputData(self.dataset) 1305 gen.GenerateAllDataOn() 1306 gen.SetDataTypeToFloat() 1307 gen.GeneratePointNormalsOff() 1308 gen.GeneratePointTensorsOn() 1309 gen.GenerateCellScalarsOn() 1310 gen.Update() 1311 self._update(gen.GetOutput(), reset_locators=False) 1312 self.pipeline = utils.OperationNode("generate_random_data", parents=[self]) 1313 return self
Fill a dataset with random attributes
1315 def integrate_data(self) -> dict: 1316 """ 1317 Integrate point and cell data arrays while computing length, 1318 area or volume of the domain. It works for 1D, 2D or 3D cells. 1319 1320 For volumetric datasets, this filter ignores all but 3D cells. 1321 It will not compute the volume contained in a closed surface. 1322 1323 Returns a dictionary with keys: `pointdata`, `celldata`, `metadata`, 1324 which contain the integration result for the corresponding attributes. 1325 1326 Examples: 1327 ```python 1328 from vedo import * 1329 surf = Sphere(res=100) 1330 surf.pointdata['scalars'] = np.ones(surf.npoints) 1331 data = surf.integrate_data() 1332 print(data['pointdata']['scalars'], "is equal to 4pi", 4*np.pi) 1333 ``` 1334 1335 ```python 1336 from vedo import * 1337 1338 xcoords1 = np.arange(0, 2.2, 0.2) 1339 xcoords2 = sqrt(np.arange(0, 4.2, 0.2)) 1340 1341 ycoords = np.arange(0, 1.2, 0.2) 1342 1343 surf1 = Grid(s=(xcoords1, ycoords)).rotate_y(-45).lw(2) 1344 surf2 = Grid(s=(xcoords2, ycoords)).rotate_y(-45).lw(2) 1345 1346 surf1.pointdata['scalars'] = surf1.vertices[:,2] 1347 surf2.pointdata['scalars'] = surf2.vertices[:,2] 1348 1349 data1 = surf1.integrate_data() 1350 data2 = surf2.integrate_data() 1351 1352 print(data1['pointdata']['scalars'], 1353 "is equal to", 1354 data2['pointdata']['scalars'], 1355 "even if the grids are different!", 1356 "(= the volume under the surface)" 1357 ) 1358 show(surf1, surf2, N=2, axes=1).close() 1359 ``` 1360 """ 1361 vinteg = vtki.new("IntegrateAttributes") 1362 vinteg.SetInputData(self.dataset) 1363 vinteg.Update() 1364 ugrid = vedo.UnstructuredGrid(vinteg.GetOutput()) 1365 data = dict( 1366 pointdata=ugrid.pointdata.todict(), 1367 celldata=ugrid.celldata.todict(), 1368 metadata=ugrid.metadata.todict(), 1369 ) 1370 return data
Integrate point and cell data arrays while computing length, area or volume of the domain. It works for 1D, 2D or 3D cells.
For volumetric datasets, this filter ignores all but 3D cells. It will not compute the volume contained in a closed surface.
Returns a dictionary with keys: pointdata
, celldata
, metadata
,
which contain the integration result for the corresponding attributes.
Examples:
from vedo import * surf = Sphere(res=100) surf.pointdata['scalars'] = np.ones(surf.npoints) data = surf.integrate_data() print(data['pointdata']['scalars'], "is equal to 4pi", 4*np.pi)
from vedo import * xcoords1 = np.arange(0, 2.2, 0.2) xcoords2 = sqrt(np.arange(0, 4.2, 0.2)) ycoords = np.arange(0, 1.2, 0.2) surf1 = Grid(s=(xcoords1, ycoords)).rotate_y(-45).lw(2) surf2 = Grid(s=(xcoords2, ycoords)).rotate_y(-45).lw(2) surf1.pointdata['scalars'] = surf1.vertices[:,2] surf2.pointdata['scalars'] = surf2.vertices[:,2] data1 = surf1.integrate_data() data2 = surf2.integrate_data() print(data1['pointdata']['scalars'], "is equal to", data2['pointdata']['scalars'], "even if the grids are different!", "(= the volume under the surface)" ) show(surf1, surf2, N=2, axes=1).close()
1372 def write(self, filename, binary=True) -> None: 1373 """Write object to file.""" 1374 out = vedo.file_io.write(self, filename, binary) 1375 out.pipeline = utils.OperationNode( 1376 "write", parents=[self], comment=filename[:15], shape="folder", c="#8a817c" 1377 )
Write object to file.
1379 def tomesh(self, bounds=(), shrink=0) -> "vedo.Mesh": 1380 """ 1381 Extract boundary geometry from dataset (or convert data to polygonal type). 1382 1383 Two new arrays are added to the mesh: `OriginalCellIds` and `OriginalPointIds` 1384 to keep track of the original mesh elements. 1385 1386 Arguments: 1387 bounds : (list) 1388 specify a sub-region to extract 1389 shrink : (float) 1390 shrink the cells to a fraction of their original size 1391 """ 1392 geo = vtki.new("GeometryFilter") 1393 1394 if shrink: 1395 sf = vtki.new("ShrinkFilter") 1396 sf.SetInputData(self.dataset) 1397 sf.SetShrinkFactor(shrink) 1398 sf.Update() 1399 geo.SetInputData(sf.GetOutput()) 1400 else: 1401 geo.SetInputData(self.dataset) 1402 1403 geo.SetPassThroughCellIds(1) 1404 geo.SetPassThroughPointIds(1) 1405 geo.SetOriginalCellIdsName("OriginalCellIds") 1406 geo.SetOriginalPointIdsName("OriginalPointIds") 1407 geo.SetNonlinearSubdivisionLevel(1) 1408 # geo.MergingOff() # crashes on StructuredGrids 1409 if bounds: 1410 geo.SetExtent(bounds) 1411 geo.ExtentClippingOn() 1412 geo.Update() 1413 msh = vedo.mesh.Mesh(geo.GetOutput()) 1414 msh.pipeline = utils.OperationNode("tomesh", parents=[self], c="#9e2a2b") 1415 return msh
Extract boundary geometry from dataset (or convert data to polygonal type).
Two new arrays are added to the mesh: OriginalCellIds
and OriginalPointIds
to keep track of the original mesh elements.
Arguments:
- bounds : (list) specify a sub-region to extract
- shrink : (float) shrink the cells to a fraction of their original size
1417 def signed_distance(self, dims=(20, 20, 20), bounds=None, invert=False, max_radius=None) -> "vedo.Volume": 1418 """ 1419 Compute the `Volume` object whose voxels contains the signed distance from 1420 the object. The calling object must have "Normals" defined. 1421 1422 Arguments: 1423 bounds : (list, actor) 1424 bounding box sizes 1425 dims : (list) 1426 dimensions (nr. of voxels) of the output volume. 1427 invert : (bool) 1428 flip the sign 1429 max_radius : (float) 1430 specify how far out to propagate distance calculation 1431 1432 Examples: 1433 - [distance2mesh.py](https://github.com/marcomusy/vedo/blob/master/examples/basic/distance2mesh.py) 1434 1435 ![](https://vedo.embl.es/images/basic/distance2mesh.png) 1436 """ 1437 if bounds is None: 1438 bounds = self.bounds() 1439 if max_radius is None: 1440 max_radius = self.diagonal_size() / 2 1441 dist = vtki.new("SignedDistance") 1442 dist.SetInputData(self.dataset) 1443 dist.SetRadius(max_radius) 1444 dist.SetBounds(bounds) 1445 dist.SetDimensions(dims) 1446 dist.Update() 1447 img = dist.GetOutput() 1448 if invert: 1449 mat = vtki.new("ImageMathematics") 1450 mat.SetInput1Data(img) 1451 mat.SetOperationToMultiplyByK() 1452 mat.SetConstantK(-1) 1453 mat.Update() 1454 img = mat.GetOutput() 1455 1456 vol = vedo.Volume(img) 1457 vol.name = "SignedDistanceVolume" 1458 vol.pipeline = utils.OperationNode( 1459 "signed_distance", 1460 parents=[self], 1461 comment=f"dims={tuple(vol.dimensions())}", 1462 c="#e9c46a:#0096c7", 1463 ) 1464 return vol
Compute the Volume
object whose voxels contains the signed distance from
the object. The calling object must have "Normals" defined.
Arguments:
- bounds : (list, actor) bounding box sizes
- dims : (list) dimensions (nr. of voxels) of the output volume.
- invert : (bool) flip the sign
- max_radius : (float) specify how far out to propagate distance calculation
Examples:
1466 def unsigned_distance( 1467 self, dims=(25,25,25), bounds=(), max_radius=0, cap_value=0) -> "vedo.Volume": 1468 """ 1469 Compute the `Volume` object whose voxels contains the unsigned distance. 1470 """ 1471 dist = vtki.new("UnsignedDistance") 1472 dist.SetInputData(self.dataset) 1473 dist.SetDimensions(dims) 1474 1475 if len(bounds) == 6: 1476 dist.SetBounds(bounds) 1477 # elif bounds == "auto": 1478 # dist.AdjustBoundsOn() 1479 else: 1480 dist.SetBounds(self.bounds()) 1481 if not max_radius: 1482 max_radius = self.diagonal_size() / 10 1483 dist.SetRadius(max_radius) 1484 1485 if self.point_locator: 1486 dist.SetLocator(self.point_locator) 1487 1488 if cap_value is not None: 1489 dist.CappingOn() 1490 dist.SetCapValue(cap_value) 1491 dist.SetOutputScalarTypeToFloat() 1492 dist.Update() 1493 vol = vedo.Volume(dist.GetOutput()) 1494 vol.name = "UnsignedDistanceVolume" 1495 vol.pipeline = utils.OperationNode( 1496 "unsigned_distance", parents=[self], c="#e9c46a:#0096c7") 1497 return vol
Compute the Volume
object whose voxels contains the unsigned distance.
1499 def smooth_data(self, 1500 niter=10, relaxation_factor=0.1, strategy=0, mask=None, 1501 exclude=("Normals", "TextureCoordinates"), 1502 ) -> Self: 1503 """ 1504 Smooth point attribute data using distance weighted Laplacian kernel. 1505 1506 The effect is to blur regions of high variation and emphasize low variation regions. 1507 1508 Arguments: 1509 niter : (int) 1510 number of iterations 1511 relaxation_factor : (float) 1512 relaxation factor controlling the amount of Laplacian smoothing applied 1513 strategy : (int) 1514 strategy to use for Laplacian smoothing 1515 - 0: use all points, all point data attributes are smoothed 1516 - 1: smooth all point attribute data except those on the boundary 1517 - 2: only point data connected to a boundary point are smoothed 1518 mask : (str, np.ndarray) 1519 array to be used as a mask (ignore then the strategy keyword) 1520 exclude : (list) 1521 list of arrays to be excluded from smoothing 1522 """ 1523 try: 1524 saf = vtki.new("AttributeSmoothingFilter") 1525 except: 1526 vedo.logger.error("smooth_data() only avaialble in VTK>=9.3.0") 1527 return self 1528 saf.SetInputData(self.dataset) 1529 saf.SetRelaxationFactor(relaxation_factor) 1530 saf.SetNumberOfIterations(niter) 1531 1532 for ex in exclude: 1533 saf.AddExcludedArray(ex) 1534 1535 saf.SetWeightsTypeToDistance2() 1536 1537 saf.SetSmoothingStrategy(strategy) 1538 if mask is not None: 1539 saf.SetSmoothingStrategyToSmoothingMask() 1540 if isinstance(mask, str): 1541 mask_ = self.dataset.GetPointData().GetArray(mask) 1542 if not mask_: 1543 vedo.logger.error(f"smooth_data(): mask array {mask} not found") 1544 return self 1545 mask_array = vtki.vtkUnsignedCharArray() 1546 mask_array.ShallowCopy(mask_) 1547 mask_array.SetName(mask_.GetName()) 1548 else: 1549 mask_array = utils.numpy2vtk(mask, dtype=np.uint8) 1550 saf.SetSmoothingMask(mask_array) 1551 1552 saf.Update() 1553 1554 self._update(saf.GetOutput()) 1555 self.pipeline = utils.OperationNode( 1556 "smooth_data", comment=f"strategy {strategy}", parents=[self], c="#9e2a2b" 1557 ) 1558 return self
Smooth point attribute data using distance weighted Laplacian kernel.
The effect is to blur regions of high variation and emphasize low variation regions.
Arguments:
- niter : (int) number of iterations
- relaxation_factor : (float) relaxation factor controlling the amount of Laplacian smoothing applied
- strategy : (int) strategy to use for Laplacian smoothing - 0: use all points, all point data attributes are smoothed - 1: smooth all point attribute data except those on the boundary - 2: only point data connected to a boundary point are smoothed
- mask : (str, np.ndarray) array to be used as a mask (ignore then the strategy keyword)
- exclude : (list) list of arrays to be excluded from smoothing
1560 def compute_streamlines( 1561 self, 1562 seeds: Any, 1563 integrator="rk4", 1564 direction="forward", 1565 initial_step_size=None, 1566 max_propagation=None, 1567 max_steps=10000, 1568 step_length=0, 1569 surface_constrained=False, 1570 compute_vorticity=False, 1571 ) -> Union["vedo.Lines", None]: 1572 """ 1573 Integrate a vector field to generate streamlines. 1574 1575 Arguments: 1576 seeds : (Mesh, Points, list) 1577 starting points of the streamlines 1578 integrator : (str) 1579 type of integration method to be used: 1580 - "rk2" (Runge-Kutta 2) 1581 - "rk4" (Runge-Kutta 4) 1582 - "rk45" (Runge-Kutta 45) 1583 direction : (str) 1584 direction of integration, either "forward", "backward" or "both" 1585 initial_step_size : (float) 1586 initial step size used for line integration 1587 max_propagation : (float) 1588 maximum length of a streamline expressed in absolute units 1589 max_steps : (int) 1590 maximum number of steps for a streamline 1591 step_length : (float) 1592 maximum length of a step expressed in absolute units 1593 surface_constrained : (bool) 1594 whether to stop integrating when the streamline leaves the surface 1595 compute_vorticity : (bool) 1596 whether to compute the vorticity at each streamline point 1597 """ 1598 b = self.dataset.GetBounds() 1599 size = (b[5]-b[4] + b[3]-b[2] + b[1]-b[0]) / 3 1600 if initial_step_size is None: 1601 initial_step_size = size / 1000.0 1602 1603 if max_propagation is None: 1604 max_propagation = size * 2 1605 1606 if utils.is_sequence(seeds): 1607 seeds = vedo.Points(seeds) 1608 1609 sti = vtki.new("StreamTracer") 1610 sti.SetSourceData(seeds.dataset) 1611 if isinstance(self, vedo.RectilinearGrid): 1612 sti.SetInputData(vedo.UnstructuredGrid(self.dataset).dataset) 1613 else: 1614 sti.SetInputDataObject(self.dataset) 1615 1616 sti.SetInitialIntegrationStep(initial_step_size) 1617 sti.SetComputeVorticity(compute_vorticity) 1618 sti.SetMaximumNumberOfSteps(max_steps) 1619 sti.SetMaximumPropagation(max_propagation) 1620 sti.SetSurfaceStreamlines(surface_constrained) 1621 if step_length: 1622 sti.SetMaximumIntegrationStep(step_length) 1623 1624 if "for" in direction: 1625 sti.SetIntegrationDirectionToForward() 1626 elif "back" in direction: 1627 sti.SetIntegrationDirectionToBackward() 1628 elif "both" in direction: 1629 sti.SetIntegrationDirectionToBoth() 1630 else: 1631 vedo.logger.error(f"in compute_streamlines(), unknown direction {direction}") 1632 return None 1633 1634 if integrator == "rk2": 1635 sti.SetIntegratorTypeToRungeKutta2() 1636 elif integrator == "rk4": 1637 sti.SetIntegratorTypeToRungeKutta4() 1638 elif integrator == "rk45": 1639 sti.SetIntegratorTypeToRungeKutta45() 1640 else: 1641 vedo.logger.error(f"in compute_streamlines(), unknown integrator {integrator}") 1642 return None 1643 1644 sti.Update() 1645 1646 stlines = vedo.shapes.Lines(sti.GetOutput(), lw=4) 1647 stlines.name = "StreamLines" 1648 self.pipeline = utils.OperationNode( 1649 "compute_streamlines", comment=f"{integrator}", parents=[self, seeds], c="#9e2a2b" 1650 ) 1651 return stlines
Integrate a vector field to generate streamlines.
Arguments:
- seeds : (Mesh, Points, list) starting points of the streamlines
- integrator : (str) type of integration method to be used: - "rk2" (Runge-Kutta 2) - "rk4" (Runge-Kutta 4) - "rk45" (Runge-Kutta 45)
- direction : (str) direction of integration, either "forward", "backward" or "both"
- initial_step_size : (float) initial step size used for line integration
- max_propagation : (float) maximum length of a streamline expressed in absolute units
- max_steps : (int) maximum number of steps for a streamline
- step_length : (float) maximum length of a step expressed in absolute units
- surface_constrained : (bool) whether to stop integrating when the streamline leaves the surface
- compute_vorticity : (bool) whether to compute the vorticity at each streamline point
1654class PointAlgorithms(CommonAlgorithms): 1655 """Methods for point clouds.""" 1656 1657 def apply_transform(self, LT: Any, concatenate=True, deep_copy=True) -> Self: 1658 """ 1659 Apply a linear or non-linear transformation to the mesh polygonal data. 1660 1661 Example: 1662 ```python 1663 from vedo import Cube, show, settings 1664 settings.use_parallel_projection = True 1665 c1 = Cube().rotate_z(25).pos(2,1).mirror().alpha(0.5) 1666 T = c1.transform # rotate by 5 degrees, place at (2,1) 1667 c2 = Cube().c('red4').wireframe().lw(10).lighting('off') 1668 c2.apply_transform(T) 1669 show(c1, c2, "The 2 cubes should overlap!", axes=1).close() 1670 ``` 1671 1672 ![](https://vedo.embl.es/images/feats/apply_transform.png) 1673 """ 1674 if self.dataset.GetNumberOfPoints() == 0: 1675 return self 1676 1677 if isinstance(LT, LinearTransform): 1678 LT_is_linear = True 1679 tr = LT.T 1680 if LT.is_identity(): 1681 return self 1682 1683 elif isinstance(LT, (vtki.vtkMatrix4x4, vtki.vtkLinearTransform)) or utils.is_sequence(LT): 1684 LT_is_linear = True 1685 LT = LinearTransform(LT) 1686 tr = LT.T 1687 if LT.is_identity(): 1688 return self 1689 1690 elif isinstance(LT, NonLinearTransform): 1691 LT_is_linear = False 1692 tr = LT.T 1693 self.transform = LT # reset 1694 1695 elif isinstance(LT, vtki.vtkThinPlateSplineTransform): 1696 LT_is_linear = False 1697 tr = LT 1698 self.transform = NonLinearTransform(LT) # reset 1699 1700 else: 1701 vedo.logger.error(f"apply_transform(), unknown input type:\n{LT}") 1702 return self 1703 1704 ################ 1705 if LT_is_linear: 1706 if concatenate: 1707 try: 1708 # self.transform might still not be linear 1709 self.transform.concatenate(LT) 1710 except AttributeError: 1711 # in that case reset it 1712 self.transform = LinearTransform() 1713 1714 ################ 1715 if isinstance(self.dataset, vtki.vtkPolyData): 1716 tp = vtki.new("TransformPolyDataFilter") 1717 elif isinstance(self.dataset, vtki.vtkUnstructuredGrid): 1718 tp = vtki.new("TransformFilter") 1719 tp.TransformAllInputVectorsOn() 1720 # elif isinstance(self.dataset, vtki.vtkImageData): 1721 # tp = vtki.new("ImageReslice") 1722 # tp.SetInterpolationModeToCubic() 1723 # tp.SetResliceTransform(tr) 1724 else: 1725 vedo.logger.error(f"apply_transform(), unknown input type: {[self.dataset]}") 1726 return self 1727 1728 tp.SetTransform(tr) 1729 tp.SetInputData(self.dataset) 1730 tp.Update() 1731 out = tp.GetOutput() 1732 1733 if deep_copy: 1734 self.dataset.DeepCopy(out) 1735 else: 1736 self.dataset.ShallowCopy(out) 1737 1738 # reset the locators 1739 self.point_locator = None 1740 self.cell_locator = None 1741 self.line_locator = None 1742 return self 1743 1744 def apply_transform_from_actor(self) -> LinearTransform: 1745 """ 1746 Apply the current transformation of the actor to the data. 1747 Useful when manually moving an actor (eg. when pressing "a"). 1748 Returns the `LinearTransform` object. 1749 1750 Note that this method is automatically called when the window is closed, 1751 or the interactor style is changed. 1752 """ 1753 M = self.actor.GetMatrix() 1754 self.apply_transform(M) 1755 iden = vtki.vtkMatrix4x4() 1756 self.actor.PokeMatrix(iden) 1757 return LinearTransform(M) 1758 1759 def pos(self, x=None, y=None, z=None) -> Self: 1760 """Set/Get object position.""" 1761 if x is None: # get functionality 1762 return self.transform.position 1763 1764 if z is None and y is None: # assume x is of the form (x,y,z) 1765 if len(x) == 3: 1766 x, y, z = x 1767 else: 1768 x, y = x 1769 z = 0 1770 elif z is None: # assume x,y is of the form x, y 1771 z = 0 1772 1773 q = self.transform.position 1774 delta = [x, y, z] - q 1775 if delta[0] == delta[1] == delta[2] == 0: 1776 return self 1777 LT = LinearTransform().translate(delta) 1778 return self.apply_transform(LT) 1779 1780 def shift(self, dx=0, dy=0, dz=0) -> Self: 1781 """Add a vector to the current object position.""" 1782 if utils.is_sequence(dx): 1783 dx, dy, dz = utils.make3d(dx) 1784 if dx == dy == dz == 0: 1785 return self 1786 LT = LinearTransform().translate([dx, dy, dz]) 1787 return self.apply_transform(LT) 1788 1789 def x(self, val=None) -> Self: 1790 """Set/Get object position along x axis.""" 1791 p = self.transform.position 1792 if val is None: 1793 return p[0] 1794 self.pos(val, p[1], p[2]) 1795 return self 1796 1797 def y(self, val=None)-> Self: 1798 """Set/Get object position along y axis.""" 1799 p = self.transform.position 1800 if val is None: 1801 return p[1] 1802 self.pos(p[0], val, p[2]) 1803 return self 1804 1805 def z(self, val=None) -> Self: 1806 """Set/Get object position along z axis.""" 1807 p = self.transform.position 1808 if val is None: 1809 return p[2] 1810 self.pos(p[0], p[1], val) 1811 return self 1812 1813 def rotate(self, angle: float, axis=(1, 0, 0), point=(0, 0, 0), rad=False) -> Self: 1814 """ 1815 Rotate around an arbitrary `axis` passing through `point`. 1816 1817 Example: 1818 ```python 1819 from vedo import * 1820 c1 = Cube() 1821 c2 = c1.clone().c('violet').alpha(0.5) # copy of c1 1822 v = vector(0.2,1,0) 1823 p = vector(1,0,0) # axis passes through this point 1824 c2.rotate(90, axis=v, point=p) 1825 l = Line(-v+p, v+p).lw(3).c('red') 1826 show(c1, l, c2, axes=1).close() 1827 ``` 1828 1829 ![](https://vedo.embl.es/images/feats/rotate_axis.png) 1830 """ 1831 LT = LinearTransform() 1832 LT.rotate(angle, axis, point, rad) 1833 return self.apply_transform(LT) 1834 1835 def rotate_x(self, angle: float, rad=False, around=None) -> Self: 1836 """ 1837 Rotate around x-axis. If angle is in radians set `rad=True`. 1838 1839 Use `around` to define a pivoting point. 1840 """ 1841 if angle == 0: 1842 return self 1843 LT = LinearTransform().rotate_x(angle, rad, around) 1844 return self.apply_transform(LT) 1845 1846 def rotate_y(self, angle: float, rad=False, around=None) -> Self: 1847 """ 1848 Rotate around y-axis. If angle is in radians set `rad=True`. 1849 1850 Use `around` to define a pivoting point. 1851 """ 1852 if angle == 0: 1853 return self 1854 LT = LinearTransform().rotate_y(angle, rad, around) 1855 return self.apply_transform(LT) 1856 1857 def rotate_z(self, angle: float, rad=False, around=None) -> Self: 1858 """ 1859 Rotate around z-axis. If angle is in radians set `rad=True`. 1860 1861 Use `around` to define a pivoting point. 1862 """ 1863 if angle == 0: 1864 return self 1865 LT = LinearTransform().rotate_z(angle, rad, around) 1866 return self.apply_transform(LT) 1867 1868 def reorient(self, initaxis, newaxis, rotation=0, rad=False, xyplane=False) -> Self: 1869 """ 1870 Reorient the object to point to a new direction from an initial one. 1871 If `initaxis` is None, the object will be assumed in its "default" orientation. 1872 If `xyplane` is True, the object will be rotated to lie on the xy plane. 1873 1874 Use `rotation` to first rotate the object around its `initaxis`. 1875 """ 1876 q = self.transform.position 1877 LT = LinearTransform() 1878 LT.reorient(initaxis, newaxis, q, rotation, rad, xyplane) 1879 return self.apply_transform(LT) 1880 1881 def scale(self, s=None, reset=False, origin=True) -> Union[Self, np.array]: 1882 """ 1883 Set/get object's scaling factor. 1884 1885 Arguments: 1886 s : (list, float) 1887 scaling factor(s). 1888 reset : (bool) 1889 if True previous scaling factors are ignored. 1890 origin : (bool) 1891 if True scaling is applied with respect to object's position, 1892 otherwise is applied respect to (0,0,0). 1893 1894 Note: 1895 use `s=(sx,sy,sz)` to scale differently in the three coordinates. 1896 """ 1897 if s is None: 1898 return np.array(self.transform.T.GetScale()) 1899 1900 if not utils.is_sequence(s): 1901 s = [s, s, s] 1902 1903 LT = LinearTransform() 1904 if reset: 1905 old_s = np.array(self.transform.T.GetScale()) 1906 LT.scale(s / old_s) 1907 else: 1908 if origin is True: 1909 LT.scale(s, origin=self.transform.position) 1910 elif origin is False: 1911 LT.scale(s, origin=False) 1912 else: 1913 LT.scale(s, origin=origin) 1914 1915 return self.apply_transform(LT)
Methods for point clouds.
1657 def apply_transform(self, LT: Any, concatenate=True, deep_copy=True) -> Self: 1658 """ 1659 Apply a linear or non-linear transformation to the mesh polygonal data. 1660 1661 Example: 1662 ```python 1663 from vedo import Cube, show, settings 1664 settings.use_parallel_projection = True 1665 c1 = Cube().rotate_z(25).pos(2,1).mirror().alpha(0.5) 1666 T = c1.transform # rotate by 5 degrees, place at (2,1) 1667 c2 = Cube().c('red4').wireframe().lw(10).lighting('off') 1668 c2.apply_transform(T) 1669 show(c1, c2, "The 2 cubes should overlap!", axes=1).close() 1670 ``` 1671 1672 ![](https://vedo.embl.es/images/feats/apply_transform.png) 1673 """ 1674 if self.dataset.GetNumberOfPoints() == 0: 1675 return self 1676 1677 if isinstance(LT, LinearTransform): 1678 LT_is_linear = True 1679 tr = LT.T 1680 if LT.is_identity(): 1681 return self 1682 1683 elif isinstance(LT, (vtki.vtkMatrix4x4, vtki.vtkLinearTransform)) or utils.is_sequence(LT): 1684 LT_is_linear = True 1685 LT = LinearTransform(LT) 1686 tr = LT.T 1687 if LT.is_identity(): 1688 return self 1689 1690 elif isinstance(LT, NonLinearTransform): 1691 LT_is_linear = False 1692 tr = LT.T 1693 self.transform = LT # reset 1694 1695 elif isinstance(LT, vtki.vtkThinPlateSplineTransform): 1696 LT_is_linear = False 1697 tr = LT 1698 self.transform = NonLinearTransform(LT) # reset 1699 1700 else: 1701 vedo.logger.error(f"apply_transform(), unknown input type:\n{LT}") 1702 return self 1703 1704 ################ 1705 if LT_is_linear: 1706 if concatenate: 1707 try: 1708 # self.transform might still not be linear 1709 self.transform.concatenate(LT) 1710 except AttributeError: 1711 # in that case reset it 1712 self.transform = LinearTransform() 1713 1714 ################ 1715 if isinstance(self.dataset, vtki.vtkPolyData): 1716 tp = vtki.new("TransformPolyDataFilter") 1717 elif isinstance(self.dataset, vtki.vtkUnstructuredGrid): 1718 tp = vtki.new("TransformFilter") 1719 tp.TransformAllInputVectorsOn() 1720 # elif isinstance(self.dataset, vtki.vtkImageData): 1721 # tp = vtki.new("ImageReslice") 1722 # tp.SetInterpolationModeToCubic() 1723 # tp.SetResliceTransform(tr) 1724 else: 1725 vedo.logger.error(f"apply_transform(), unknown input type: {[self.dataset]}") 1726 return self 1727 1728 tp.SetTransform(tr) 1729 tp.SetInputData(self.dataset) 1730 tp.Update() 1731 out = tp.GetOutput() 1732 1733 if deep_copy: 1734 self.dataset.DeepCopy(out) 1735 else: 1736 self.dataset.ShallowCopy(out) 1737 1738 # reset the locators 1739 self.point_locator = None 1740 self.cell_locator = None 1741 self.line_locator = None 1742 return self
Apply a linear or non-linear transformation to the mesh polygonal data.
Example:
from vedo import Cube, show, settings
settings.use_parallel_projection = True
c1 = Cube().rotate_z(25).pos(2,1).mirror().alpha(0.5)
T = c1.transform # rotate by 5 degrees, place at (2,1)
c2 = Cube().c('red4').wireframe().lw(10).lighting('off')
c2.apply_transform(T)
show(c1, c2, "The 2 cubes should overlap!", axes=1).close()
1744 def apply_transform_from_actor(self) -> LinearTransform: 1745 """ 1746 Apply the current transformation of the actor to the data. 1747 Useful when manually moving an actor (eg. when pressing "a"). 1748 Returns the `LinearTransform` object. 1749 1750 Note that this method is automatically called when the window is closed, 1751 or the interactor style is changed. 1752 """ 1753 M = self.actor.GetMatrix() 1754 self.apply_transform(M) 1755 iden = vtki.vtkMatrix4x4() 1756 self.actor.PokeMatrix(iden) 1757 return LinearTransform(M)
Apply the current transformation of the actor to the data.
Useful when manually moving an actor (eg. when pressing "a").
Returns the LinearTransform
object.
Note that this method is automatically called when the window is closed, or the interactor style is changed.
1759 def pos(self, x=None, y=None, z=None) -> Self: 1760 """Set/Get object position.""" 1761 if x is None: # get functionality 1762 return self.transform.position 1763 1764 if z is None and y is None: # assume x is of the form (x,y,z) 1765 if len(x) == 3: 1766 x, y, z = x 1767 else: 1768 x, y = x 1769 z = 0 1770 elif z is None: # assume x,y is of the form x, y 1771 z = 0 1772 1773 q = self.transform.position 1774 delta = [x, y, z] - q 1775 if delta[0] == delta[1] == delta[2] == 0: 1776 return self 1777 LT = LinearTransform().translate(delta) 1778 return self.apply_transform(LT)
Set/Get object position.
1780 def shift(self, dx=0, dy=0, dz=0) -> Self: 1781 """Add a vector to the current object position.""" 1782 if utils.is_sequence(dx): 1783 dx, dy, dz = utils.make3d(dx) 1784 if dx == dy == dz == 0: 1785 return self 1786 LT = LinearTransform().translate([dx, dy, dz]) 1787 return self.apply_transform(LT)
Add a vector to the current object position.
1789 def x(self, val=None) -> Self: 1790 """Set/Get object position along x axis.""" 1791 p = self.transform.position 1792 if val is None: 1793 return p[0] 1794 self.pos(val, p[1], p[2]) 1795 return self
Set/Get object position along x axis.
1797 def y(self, val=None)-> Self: 1798 """Set/Get object position along y axis.""" 1799 p = self.transform.position 1800 if val is None: 1801 return p[1] 1802 self.pos(p[0], val, p[2]) 1803 return self
Set/Get object position along y axis.
1805 def z(self, val=None) -> Self: 1806 """Set/Get object position along z axis.""" 1807 p = self.transform.position 1808 if val is None: 1809 return p[2] 1810 self.pos(p[0], p[1], val) 1811 return self
Set/Get object position along z axis.
1813 def rotate(self, angle: float, axis=(1, 0, 0), point=(0, 0, 0), rad=False) -> Self: 1814 """ 1815 Rotate around an arbitrary `axis` passing through `point`. 1816 1817 Example: 1818 ```python 1819 from vedo import * 1820 c1 = Cube() 1821 c2 = c1.clone().c('violet').alpha(0.5) # copy of c1 1822 v = vector(0.2,1,0) 1823 p = vector(1,0,0) # axis passes through this point 1824 c2.rotate(90, axis=v, point=p) 1825 l = Line(-v+p, v+p).lw(3).c('red') 1826 show(c1, l, c2, axes=1).close() 1827 ``` 1828 1829 ![](https://vedo.embl.es/images/feats/rotate_axis.png) 1830 """ 1831 LT = LinearTransform() 1832 LT.rotate(angle, axis, point, rad) 1833 return self.apply_transform(LT)
Rotate around an arbitrary axis
passing through point
.
Example:
from vedo import *
c1 = Cube()
c2 = c1.clone().c('violet').alpha(0.5) # copy of c1
v = vector(0.2,1,0)
p = vector(1,0,0) # axis passes through this point
c2.rotate(90, axis=v, point=p)
l = Line(-v+p, v+p).lw(3).c('red')
show(c1, l, c2, axes=1).close()
1835 def rotate_x(self, angle: float, rad=False, around=None) -> Self: 1836 """ 1837 Rotate around x-axis. If angle is in radians set `rad=True`. 1838 1839 Use `around` to define a pivoting point. 1840 """ 1841 if angle == 0: 1842 return self 1843 LT = LinearTransform().rotate_x(angle, rad, around) 1844 return self.apply_transform(LT)
Rotate around x-axis. If angle is in radians set rad=True
.
Use around
to define a pivoting point.
1846 def rotate_y(self, angle: float, rad=False, around=None) -> Self: 1847 """ 1848 Rotate around y-axis. If angle is in radians set `rad=True`. 1849 1850 Use `around` to define a pivoting point. 1851 """ 1852 if angle == 0: 1853 return self 1854 LT = LinearTransform().rotate_y(angle, rad, around) 1855 return self.apply_transform(LT)
Rotate around y-axis. If angle is in radians set rad=True
.
Use around
to define a pivoting point.
1857 def rotate_z(self, angle: float, rad=False, around=None) -> Self: 1858 """ 1859 Rotate around z-axis. If angle is in radians set `rad=True`. 1860 1861 Use `around` to define a pivoting point. 1862 """ 1863 if angle == 0: 1864 return self 1865 LT = LinearTransform().rotate_z(angle, rad, around) 1866 return self.apply_transform(LT)
Rotate around z-axis. If angle is in radians set rad=True
.
Use around
to define a pivoting point.
1868 def reorient(self, initaxis, newaxis, rotation=0, rad=False, xyplane=False) -> Self: 1869 """ 1870 Reorient the object to point to a new direction from an initial one. 1871 If `initaxis` is None, the object will be assumed in its "default" orientation. 1872 If `xyplane` is True, the object will be rotated to lie on the xy plane. 1873 1874 Use `rotation` to first rotate the object around its `initaxis`. 1875 """ 1876 q = self.transform.position 1877 LT = LinearTransform() 1878 LT.reorient(initaxis, newaxis, q, rotation, rad, xyplane) 1879 return self.apply_transform(LT)
Reorient the object to point to a new direction from an initial one.
If initaxis
is None, the object will be assumed in its "default" orientation.
If xyplane
is True, the object will be rotated to lie on the xy plane.
Use rotation
to first rotate the object around its initaxis
.
1881 def scale(self, s=None, reset=False, origin=True) -> Union[Self, np.array]: 1882 """ 1883 Set/get object's scaling factor. 1884 1885 Arguments: 1886 s : (list, float) 1887 scaling factor(s). 1888 reset : (bool) 1889 if True previous scaling factors are ignored. 1890 origin : (bool) 1891 if True scaling is applied with respect to object's position, 1892 otherwise is applied respect to (0,0,0). 1893 1894 Note: 1895 use `s=(sx,sy,sz)` to scale differently in the three coordinates. 1896 """ 1897 if s is None: 1898 return np.array(self.transform.T.GetScale()) 1899 1900 if not utils.is_sequence(s): 1901 s = [s, s, s] 1902 1903 LT = LinearTransform() 1904 if reset: 1905 old_s = np.array(self.transform.T.GetScale()) 1906 LT.scale(s / old_s) 1907 else: 1908 if origin is True: 1909 LT.scale(s, origin=self.transform.position) 1910 elif origin is False: 1911 LT.scale(s, origin=False) 1912 else: 1913 LT.scale(s, origin=origin) 1914 1915 return self.apply_transform(LT)
Set/get object's scaling factor.
Arguments:
- s : (list, float) scaling factor(s).
- reset : (bool) if True previous scaling factors are ignored.
- origin : (bool) if True scaling is applied with respect to object's position, otherwise is applied respect to (0,0,0).
Note:
use
s=(sx,sy,sz)
to scale differently in the three coordinates.
Inherited Members
- CommonAlgorithms
- pointdata
- celldata
- metadata
- memory_address
- memory_size
- modified
- box
- update_dataset
- bounds
- xbounds
- ybounds
- zbounds
- diagonal_size
- average_size
- center_of_mass
- copy_data_from
- inputdata
- npoints
- nvertices
- ncells
- points
- cell_centers
- lines
- lines_as_flat_array
- mark_boundaries
- find_cells_in_bounds
- find_cells_along_line
- find_cells_along_plane
- keep_cell_types
- map_cells_to_points
- vertices
- coordinates
- cells_as_flat_array
- cells
- map_points_to_cells
- resample_data_from
- interpolate_data_from
- add_ids
- gradient
- divergence
- vorticity
- probe
- compute_cell_size
- generate_random_data
- integrate_data
- write
- tomesh
- signed_distance
- unsigned_distance
- smooth_data
- compute_streamlines
1919class VolumeAlgorithms(CommonAlgorithms): 1920 """Methods for Volume objects.""" 1921 1922 def bounds(self) -> np.ndarray: 1923 """ 1924 Get the object bounds. 1925 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 1926 """ 1927 # OVERRIDE CommonAlgorithms.bounds() which is too slow 1928 return np.array(self.dataset.GetBounds()) 1929 1930 def isosurface(self, value=None, flying_edges=False) -> "vedo.mesh.Mesh": 1931 """ 1932 Return an `Mesh` isosurface extracted from the `Volume` object. 1933 1934 Set `value` as single float or list of values to draw the isosurface(s). 1935 Use flying_edges for faster results (but sometimes can interfere with `smooth()`). 1936 1937 Examples: 1938 - [isosurfaces1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces1.py) 1939 1940 ![](https://vedo.embl.es/images/volumetric/isosurfaces.png) 1941 """ 1942 scrange = self.dataset.GetScalarRange() 1943 1944 if flying_edges: 1945 cf = vtki.new("FlyingEdges3D") 1946 cf.InterpolateAttributesOn() 1947 else: 1948 cf = vtki.new("ContourFilter") 1949 cf.UseScalarTreeOn() 1950 1951 cf.SetInputData(self.dataset) 1952 cf.ComputeNormalsOn() 1953 1954 if utils.is_sequence(value): 1955 cf.SetNumberOfContours(len(value)) 1956 for i, t in enumerate(value): 1957 cf.SetValue(i, t) 1958 else: 1959 if value is None: 1960 value = (2 * scrange[0] + scrange[1]) / 3.0 1961 # print("automatic isosurface value =", value) 1962 cf.SetValue(0, value) 1963 1964 cf.Update() 1965 poly = cf.GetOutput() 1966 1967 out = vedo.mesh.Mesh(poly, c=None).phong() 1968 out.mapper.SetScalarRange(scrange[0], scrange[1]) 1969 1970 out.pipeline = utils.OperationNode( 1971 "isosurface", 1972 parents=[self], 1973 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 1974 c="#4cc9f0:#e9c46a", 1975 ) 1976 return out 1977 1978 def isosurface_discrete(self, value=None, nsmooth=15) -> "vedo.mesh.Mesh": 1979 """ 1980 Create boundary/isocontour surfaces from a label map (e.g., a segmented image) using a threaded, 1981 3D version of the multiple objects/labels Surface Nets algorithm. 1982 The input is a 3D image (i.e., volume) where each voxel is labeled 1983 (integer labels are preferred to real values), and the output data is a polygonal mesh separating 1984 labeled regions / objects. 1985 (Note that on output each region [corresponding to a different segmented object] will share 1986 points/edges on a common boundary, i.e., two neighboring objects will share the boundary that separates them). 1987 1988 Arguments: 1989 value : (float, list) 1990 single value or list of values to draw the isosurface(s). 1991 nsmooth : (int) 1992 number of iterations of smoothing (0 means no smoothing). 1993 1994 Examples: 1995 - [isosurfaces2.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces2.py) 1996 """ 1997 if not utils.is_sequence(value): 1998 value = [value] 1999 2000 snets = vtki.new("SurfaceNets3D") 2001 snets.SetInputData(self.dataset) 2002 2003 if nsmooth: 2004 snets.SmoothingOn() 2005 snets.AutomaticSmoothingConstraintsOn() 2006 snets.GetSmoother().SetNumberOfIterations(nsmooth) 2007 # snets.GetSmoother().SetRelaxationFactor(relaxation_factor) 2008 # snets.GetSmoother().SetConstraintDistance(constraint_distance) 2009 else: 2010 snets.SmoothingOff() 2011 2012 for i, val in enumerate(value): 2013 snets.SetValue(i, val) 2014 snets.Update() 2015 snets.SetOutputMeshTypeToTriangles() 2016 snets.SetOutputStyleToBoundary() 2017 snets.Update() 2018 2019 out = vedo.mesh.Mesh(snets.GetOutput()) 2020 out.pipeline = utils.OperationNode( 2021 "isosurface_discrete", 2022 parents=[self], 2023 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 2024 c="#4cc9f0:#e9c46a", 2025 ) 2026 return out 2027 2028 2029 def legosurface( 2030 self, 2031 vmin=None, 2032 vmax=None, 2033 invert=False, 2034 boundary=False, 2035 array_name="input_scalars", 2036 ) -> "vedo.mesh.Mesh": 2037 """ 2038 Represent an object - typically a `Volume` - as lego blocks (voxels). 2039 By default colors correspond to the volume's scalar. 2040 Returns an `Mesh` object. 2041 2042 Arguments: 2043 vmin : (float) 2044 the lower threshold, voxels below this value are not shown. 2045 vmax : (float) 2046 the upper threshold, voxels above this value are not shown. 2047 boundary : (bool) 2048 controls whether to include cells that are partially inside 2049 array_name : (int, str) 2050 name or index of the scalar array to be considered 2051 2052 Examples: 2053 - [legosurface.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/legosurface.py) 2054 2055 ![](https://vedo.embl.es/images/volumetric/56820682-da40e500-684c-11e9-8ea3-91cbcba24b3a.png) 2056 """ 2057 imp_dataset = vtki.new("ImplicitDataSet") 2058 imp_dataset.SetDataSet(self.dataset) 2059 window = vtki.new("ImplicitWindowFunction") 2060 window.SetImplicitFunction(imp_dataset) 2061 2062 srng = list(self.dataset.GetScalarRange()) 2063 if vmin is not None: 2064 srng[0] = vmin 2065 if vmax is not None: 2066 srng[1] = vmax 2067 tol = 0.00001 * (srng[1] - srng[0]) 2068 srng[0] -= tol 2069 srng[1] += tol 2070 window.SetWindowRange(srng) 2071 2072 extract = vtki.new("ExtractGeometry") 2073 extract.SetInputData(self.dataset) 2074 extract.SetImplicitFunction(window) 2075 extract.SetExtractInside(invert) 2076 extract.SetExtractBoundaryCells(boundary) 2077 extract.Update() 2078 2079 gf = vtki.new("GeometryFilter") 2080 gf.SetInputData(extract.GetOutput()) 2081 gf.Update() 2082 2083 m = vedo.mesh.Mesh(gf.GetOutput()).lw(0.1).flat() 2084 m.map_points_to_cells() 2085 m.celldata.select(array_name) 2086 2087 m.pipeline = utils.OperationNode( 2088 "legosurface", 2089 parents=[self], 2090 comment=f"array: {array_name}", 2091 c="#4cc9f0:#e9c46a", 2092 ) 2093 return m 2094 2095 def tomesh(self, fill=True, shrink=1.0) -> "vedo.mesh.Mesh": 2096 """ 2097 Build a polygonal Mesh from the current object. 2098 2099 If `fill=True`, the interior faces of all the cells are created. 2100 (setting a `shrink` value slightly smaller than the default 1.0 2101 can avoid flickering due to internal adjacent faces). 2102 2103 If `fill=False`, only the boundary faces will be generated. 2104 """ 2105 gf = vtki.new("GeometryFilter") 2106 if fill: 2107 sf = vtki.new("ShrinkFilter") 2108 sf.SetInputData(self.dataset) 2109 sf.SetShrinkFactor(shrink) 2110 sf.Update() 2111 gf.SetInputData(sf.GetOutput()) 2112 gf.Update() 2113 poly = gf.GetOutput() 2114 if shrink == 1.0: 2115 clean_poly = vtki.new("CleanPolyData") 2116 clean_poly.PointMergingOn() 2117 clean_poly.ConvertLinesToPointsOn() 2118 clean_poly.ConvertPolysToLinesOn() 2119 clean_poly.ConvertStripsToPolysOn() 2120 clean_poly.SetInputData(poly) 2121 clean_poly.Update() 2122 poly = clean_poly.GetOutput() 2123 else: 2124 gf.SetInputData(self.dataset) 2125 gf.Update() 2126 poly = gf.GetOutput() 2127 2128 msh = vedo.mesh.Mesh(poly).flat() 2129 msh.scalarbar = self.scalarbar 2130 lut = utils.ctf2lut(self) 2131 if lut: 2132 msh.mapper.SetLookupTable(lut) 2133 2134 msh.pipeline = utils.OperationNode( 2135 "tomesh", parents=[self], comment=f"fill={fill}", c="#9e2a2b:#e9c46a" 2136 ) 2137 return msh
Methods for Volume objects.
1922 def bounds(self) -> np.ndarray: 1923 """ 1924 Get the object bounds. 1925 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 1926 """ 1927 # OVERRIDE CommonAlgorithms.bounds() which is too slow 1928 return np.array(self.dataset.GetBounds())
Get the object bounds.
Returns a list in format [xmin,xmax, ymin,ymax, zmin,zmax]
.
1930 def isosurface(self, value=None, flying_edges=False) -> "vedo.mesh.Mesh": 1931 """ 1932 Return an `Mesh` isosurface extracted from the `Volume` object. 1933 1934 Set `value` as single float or list of values to draw the isosurface(s). 1935 Use flying_edges for faster results (but sometimes can interfere with `smooth()`). 1936 1937 Examples: 1938 - [isosurfaces1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces1.py) 1939 1940 ![](https://vedo.embl.es/images/volumetric/isosurfaces.png) 1941 """ 1942 scrange = self.dataset.GetScalarRange() 1943 1944 if flying_edges: 1945 cf = vtki.new("FlyingEdges3D") 1946 cf.InterpolateAttributesOn() 1947 else: 1948 cf = vtki.new("ContourFilter") 1949 cf.UseScalarTreeOn() 1950 1951 cf.SetInputData(self.dataset) 1952 cf.ComputeNormalsOn() 1953 1954 if utils.is_sequence(value): 1955 cf.SetNumberOfContours(len(value)) 1956 for i, t in enumerate(value): 1957 cf.SetValue(i, t) 1958 else: 1959 if value is None: 1960 value = (2 * scrange[0] + scrange[1]) / 3.0 1961 # print("automatic isosurface value =", value) 1962 cf.SetValue(0, value) 1963 1964 cf.Update() 1965 poly = cf.GetOutput() 1966 1967 out = vedo.mesh.Mesh(poly, c=None).phong() 1968 out.mapper.SetScalarRange(scrange[0], scrange[1]) 1969 1970 out.pipeline = utils.OperationNode( 1971 "isosurface", 1972 parents=[self], 1973 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 1974 c="#4cc9f0:#e9c46a", 1975 ) 1976 return out
Return an Mesh
isosurface extracted from the Volume
object.
Set value
as single float or list of values to draw the isosurface(s).
Use flying_edges for faster results (but sometimes can interfere with smooth()
).
Examples:
1978 def isosurface_discrete(self, value=None, nsmooth=15) -> "vedo.mesh.Mesh": 1979 """ 1980 Create boundary/isocontour surfaces from a label map (e.g., a segmented image) using a threaded, 1981 3D version of the multiple objects/labels Surface Nets algorithm. 1982 The input is a 3D image (i.e., volume) where each voxel is labeled 1983 (integer labels are preferred to real values), and the output data is a polygonal mesh separating 1984 labeled regions / objects. 1985 (Note that on output each region [corresponding to a different segmented object] will share 1986 points/edges on a common boundary, i.e., two neighboring objects will share the boundary that separates them). 1987 1988 Arguments: 1989 value : (float, list) 1990 single value or list of values to draw the isosurface(s). 1991 nsmooth : (int) 1992 number of iterations of smoothing (0 means no smoothing). 1993 1994 Examples: 1995 - [isosurfaces2.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces2.py) 1996 """ 1997 if not utils.is_sequence(value): 1998 value = [value] 1999 2000 snets = vtki.new("SurfaceNets3D") 2001 snets.SetInputData(self.dataset) 2002 2003 if nsmooth: 2004 snets.SmoothingOn() 2005 snets.AutomaticSmoothingConstraintsOn() 2006 snets.GetSmoother().SetNumberOfIterations(nsmooth) 2007 # snets.GetSmoother().SetRelaxationFactor(relaxation_factor) 2008 # snets.GetSmoother().SetConstraintDistance(constraint_distance) 2009 else: 2010 snets.SmoothingOff() 2011 2012 for i, val in enumerate(value): 2013 snets.SetValue(i, val) 2014 snets.Update() 2015 snets.SetOutputMeshTypeToTriangles() 2016 snets.SetOutputStyleToBoundary() 2017 snets.Update() 2018 2019 out = vedo.mesh.Mesh(snets.GetOutput()) 2020 out.pipeline = utils.OperationNode( 2021 "isosurface_discrete", 2022 parents=[self], 2023 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 2024 c="#4cc9f0:#e9c46a", 2025 ) 2026 return out
Create boundary/isocontour surfaces from a label map (e.g., a segmented image) using a threaded, 3D version of the multiple objects/labels Surface Nets algorithm. The input is a 3D image (i.e., volume) where each voxel is labeled (integer labels are preferred to real values), and the output data is a polygonal mesh separating labeled regions / objects. (Note that on output each region [corresponding to a different segmented object] will share points/edges on a common boundary, i.e., two neighboring objects will share the boundary that separates them).
Arguments:
- value : (float, list) single value or list of values to draw the isosurface(s).
- nsmooth : (int) number of iterations of smoothing (0 means no smoothing).
Examples:
2029 def legosurface( 2030 self, 2031 vmin=None, 2032 vmax=None, 2033 invert=False, 2034 boundary=False, 2035 array_name="input_scalars", 2036 ) -> "vedo.mesh.Mesh": 2037 """ 2038 Represent an object - typically a `Volume` - as lego blocks (voxels). 2039 By default colors correspond to the volume's scalar. 2040 Returns an `Mesh` object. 2041 2042 Arguments: 2043 vmin : (float) 2044 the lower threshold, voxels below this value are not shown. 2045 vmax : (float) 2046 the upper threshold, voxels above this value are not shown. 2047 boundary : (bool) 2048 controls whether to include cells that are partially inside 2049 array_name : (int, str) 2050 name or index of the scalar array to be considered 2051 2052 Examples: 2053 - [legosurface.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/legosurface.py) 2054 2055 ![](https://vedo.embl.es/images/volumetric/56820682-da40e500-684c-11e9-8ea3-91cbcba24b3a.png) 2056 """ 2057 imp_dataset = vtki.new("ImplicitDataSet") 2058 imp_dataset.SetDataSet(self.dataset) 2059 window = vtki.new("ImplicitWindowFunction") 2060 window.SetImplicitFunction(imp_dataset) 2061 2062 srng = list(self.dataset.GetScalarRange()) 2063 if vmin is not None: 2064 srng[0] = vmin 2065 if vmax is not None: 2066 srng[1] = vmax 2067 tol = 0.00001 * (srng[1] - srng[0]) 2068 srng[0] -= tol 2069 srng[1] += tol 2070 window.SetWindowRange(srng) 2071 2072 extract = vtki.new("ExtractGeometry") 2073 extract.SetInputData(self.dataset) 2074 extract.SetImplicitFunction(window) 2075 extract.SetExtractInside(invert) 2076 extract.SetExtractBoundaryCells(boundary) 2077 extract.Update() 2078 2079 gf = vtki.new("GeometryFilter") 2080 gf.SetInputData(extract.GetOutput()) 2081 gf.Update() 2082 2083 m = vedo.mesh.Mesh(gf.GetOutput()).lw(0.1).flat() 2084 m.map_points_to_cells() 2085 m.celldata.select(array_name) 2086 2087 m.pipeline = utils.OperationNode( 2088 "legosurface", 2089 parents=[self], 2090 comment=f"array: {array_name}", 2091 c="#4cc9f0:#e9c46a", 2092 ) 2093 return m
Represent an object - typically a Volume
- as lego blocks (voxels).
By default colors correspond to the volume's scalar.
Returns an Mesh
object.
Arguments:
- vmin : (float) the lower threshold, voxels below this value are not shown.
- vmax : (float) the upper threshold, voxels above this value are not shown.
- boundary : (bool) controls whether to include cells that are partially inside
- array_name : (int, str) name or index of the scalar array to be considered
Examples:
2095 def tomesh(self, fill=True, shrink=1.0) -> "vedo.mesh.Mesh": 2096 """ 2097 Build a polygonal Mesh from the current object. 2098 2099 If `fill=True`, the interior faces of all the cells are created. 2100 (setting a `shrink` value slightly smaller than the default 1.0 2101 can avoid flickering due to internal adjacent faces). 2102 2103 If `fill=False`, only the boundary faces will be generated. 2104 """ 2105 gf = vtki.new("GeometryFilter") 2106 if fill: 2107 sf = vtki.new("ShrinkFilter") 2108 sf.SetInputData(self.dataset) 2109 sf.SetShrinkFactor(shrink) 2110 sf.Update() 2111 gf.SetInputData(sf.GetOutput()) 2112 gf.Update() 2113 poly = gf.GetOutput() 2114 if shrink == 1.0: 2115 clean_poly = vtki.new("CleanPolyData") 2116 clean_poly.PointMergingOn() 2117 clean_poly.ConvertLinesToPointsOn() 2118 clean_poly.ConvertPolysToLinesOn() 2119 clean_poly.ConvertStripsToPolysOn() 2120 clean_poly.SetInputData(poly) 2121 clean_poly.Update() 2122 poly = clean_poly.GetOutput() 2123 else: 2124 gf.SetInputData(self.dataset) 2125 gf.Update() 2126 poly = gf.GetOutput() 2127 2128 msh = vedo.mesh.Mesh(poly).flat() 2129 msh.scalarbar = self.scalarbar 2130 lut = utils.ctf2lut(self) 2131 if lut: 2132 msh.mapper.SetLookupTable(lut) 2133 2134 msh.pipeline = utils.OperationNode( 2135 "tomesh", parents=[self], comment=f"fill={fill}", c="#9e2a2b:#e9c46a" 2136 ) 2137 return msh
Build a polygonal Mesh from the current object.
If fill=True
, the interior faces of all the cells are created.
(setting a shrink
value slightly smaller than the default 1.0
can avoid flickering due to internal adjacent faces).
If fill=False
, only the boundary faces will be generated.
Inherited Members
- CommonAlgorithms
- pointdata
- celldata
- metadata
- memory_address
- memory_size
- modified
- box
- update_dataset
- xbounds
- ybounds
- zbounds
- diagonal_size
- average_size
- center_of_mass
- copy_data_from
- inputdata
- npoints
- nvertices
- ncells
- points
- cell_centers
- lines
- lines_as_flat_array
- mark_boundaries
- find_cells_in_bounds
- find_cells_along_line
- find_cells_along_plane
- keep_cell_types
- map_cells_to_points
- vertices
- coordinates
- cells_as_flat_array
- cells
- map_points_to_cells
- resample_data_from
- interpolate_data_from
- add_ids
- gradient
- divergence
- vorticity
- probe
- compute_cell_size
- generate_random_data
- integrate_data
- write
- signed_distance
- unsigned_distance
- smooth_data
- compute_streamlines