vedo.core
Base classes providing functionality to different vedo objects.
1#!/usr/bin/env python3 2# -*- coding: utf-8 -*- 3import numpy as np 4from typing import List, Union, Any 5from typing_extensions import Self 6 7import vedo.vtkclasses as vtki 8 9import vedo 10from vedo import colors 11from vedo import utils 12from vedo.transformations import LinearTransform, NonLinearTransform 13 14 15__docformat__ = "google" 16 17__doc__ = """ 18Base classes providing functionality to different vedo objects. 19 20![](https://vedo.embl.es/images/feats/algorithms_illustration.png) 21""" 22 23__all__ = [ 24 "DataArrayHelper", 25 "CommonAlgorithms", 26 "PointAlgorithms", 27 "VolumeAlgorithms", 28] 29 30warnings = dict( 31 points_getter=( 32 "WARNING: points() is deprecated, use vertices instead. Change:\n" 33 " mesh.points() -> mesh.vertices\n" 34 " (silence this with vedo.core.warnings['points_getter']=False)" 35 ), 36 points_setter=( 37 "WARNING: points() is deprecated, use vertices instead. Change:\n" 38 " mesh.points([[x,y,z], ...]) -> mesh.vertices = [[x,y,z], ...]\n" 39 " (silence this with vedo.core.warnings['points_getter']=False)" 40 ), 41) 42 43############################################################################### 44class DataArrayHelper: 45 # Internal use only. 46 # Helper class to manage data associated to either 47 # points (or vertices) and cells (or faces). 48 def __init__(self, obj, association): 49 50 self.obj = obj 51 self.association = association 52 53 def __getitem__(self, key): 54 55 if self.association == 0: 56 data = self.obj.dataset.GetPointData() 57 58 elif self.association == 1: 59 data = self.obj.dataset.GetCellData() 60 61 elif self.association == 2: 62 data = self.obj.dataset.GetFieldData() 63 64 varr = data.GetAbstractArray(key) 65 if isinstance(varr, vtki.vtkStringArray): 66 if isinstance(key, int): 67 key = data.GetArrayName(key) 68 n = varr.GetNumberOfValues() 69 narr = [varr.GetValue(i) for i in range(n)] 70 return narr 71 ########### 72 73 else: 74 raise RuntimeError() 75 76 if isinstance(key, int): 77 key = data.GetArrayName(key) 78 79 arr = data.GetArray(key) 80 if not arr: 81 return None 82 return utils.vtk2numpy(arr) 83 84 def __setitem__(self, key, input_array): 85 86 if self.association == 0: 87 data = self.obj.dataset.GetPointData() 88 n = self.obj.dataset.GetNumberOfPoints() 89 self.obj.mapper.SetScalarModeToUsePointData() 90 91 elif self.association == 1: 92 data = self.obj.dataset.GetCellData() 93 n = self.obj.dataset.GetNumberOfCells() 94 self.obj.mapper.SetScalarModeToUseCellData() 95 96 elif self.association == 2: 97 data = self.obj.dataset.GetFieldData() 98 if not utils.is_sequence(input_array): 99 input_array = [input_array] 100 101 if isinstance(input_array[0], str): 102 varr = vtki.vtkStringArray() 103 varr.SetName(key) 104 varr.SetNumberOfComponents(1) 105 varr.SetNumberOfTuples(len(input_array)) 106 for i, iarr in enumerate(input_array): 107 if isinstance(iarr, np.ndarray): 108 iarr = iarr.tolist() # better format 109 # Note: a string k can be converted to numpy with 110 # import json; k = np.array(json.loads(k)) 111 varr.InsertValue(i, str(iarr)) 112 else: 113 try: 114 varr = utils.numpy2vtk(input_array, name=key) 115 except TypeError as e: 116 vedo.logger.error( 117 f"cannot create metadata with input object:\n" 118 f"{input_array}" 119 f"\n\nAllowed content examples are:\n" 120 f"- flat list of strings ['a','b', 1, [1,2,3], ...]" 121 f" (first item must be a string in this case)\n" 122 f" hint: use k = np.array(json.loads(k)) to convert strings\n" 123 f"- numpy arrays of any shape" 124 ) 125 raise e 126 127 data.AddArray(varr) 128 return ############ 129 130 else: 131 raise RuntimeError() 132 133 if len(input_array) != n: 134 vedo.logger.error( 135 f"Error in point/cell data: length of input {len(input_array)}" 136 f" != {n} nr. of elements" 137 ) 138 raise RuntimeError() 139 140 input_array = np.asarray(input_array) 141 varr = utils.numpy2vtk(input_array, name=key) 142 data.AddArray(varr) 143 144 if len(input_array.shape) == 1: # scalars 145 data.SetActiveScalars(key) 146 try: # could be a volume mapper 147 self.obj.mapper.SetScalarRange(data.GetScalars().GetRange()) 148 except AttributeError: 149 pass 150 elif len(input_array.shape) == 2 and input_array.shape[1] == 3: # vectors 151 if key.lower() == "normals": 152 data.SetActiveNormals(key) 153 else: 154 data.SetActiveVectors(key) 155 156 def keys(self) -> List[str]: 157 """Return the list of available data array names""" 158 if self.association == 0: 159 data = self.obj.dataset.GetPointData() 160 elif self.association == 1: 161 data = self.obj.dataset.GetCellData() 162 elif self.association == 2: 163 data = self.obj.dataset.GetFieldData() 164 arrnames = [] 165 for i in range(data.GetNumberOfArrays()): 166 name = "" 167 if self.association == 2: 168 name = data.GetAbstractArray(i).GetName() 169 else: 170 iarr = data.GetArray(i) 171 if iarr: 172 name = iarr.GetName() 173 if name: 174 arrnames.append(name) 175 return arrnames 176 177 def items(self) -> List: 178 """Return the list of available data array `(names, values)`.""" 179 if self.association == 0: 180 data = self.obj.dataset.GetPointData() 181 elif self.association == 1: 182 data = self.obj.dataset.GetCellData() 183 elif self.association == 2: 184 data = self.obj.dataset.GetFieldData() 185 arrnames = [] 186 for i in range(data.GetNumberOfArrays()): 187 if self.association == 2: 188 name = data.GetAbstractArray(i).GetName() 189 else: 190 name = data.GetArray(i).GetName() 191 if name: 192 arrnames.append((name, self[name])) 193 return arrnames 194 195 def todict(self) -> dict: 196 """Return a dictionary of the available data arrays.""" 197 return dict(self.items()) 198 199 def rename(self, oldname: str, newname: str) -> None: 200 """Rename an array""" 201 if self.association == 0: 202 varr = self.obj.dataset.GetPointData().GetArray(oldname) 203 elif self.association == 1: 204 varr = self.obj.dataset.GetCellData().GetArray(oldname) 205 elif self.association == 2: 206 varr = self.obj.dataset.GetFieldData().GetAbstractArray(oldname) 207 if varr: 208 varr.SetName(newname) 209 else: 210 vedo.logger.warning( 211 f"Cannot rename non existing array {oldname} to {newname}" 212 ) 213 214 def remove(self, key: Union[int, str]) -> None: 215 """Remove a data array by name or number""" 216 if self.association == 0: 217 self.obj.dataset.GetPointData().RemoveArray(key) 218 elif self.association == 1: 219 self.obj.dataset.GetCellData().RemoveArray(key) 220 elif self.association == 2: 221 self.obj.dataset.GetFieldData().RemoveArray(key) 222 223 def clear(self) -> None: 224 """Remove all data associated to this object""" 225 if self.association == 0: 226 data = self.obj.dataset.GetPointData() 227 elif self.association == 1: 228 data = self.obj.dataset.GetCellData() 229 elif self.association == 2: 230 data = self.obj.dataset.GetFieldData() 231 for i in range(data.GetNumberOfArrays()): 232 if self.association == 2: 233 name = data.GetAbstractArray(i).GetName() 234 else: 235 name = data.GetArray(i).GetName() 236 data.RemoveArray(name) 237 238 def select(self, key: Union[int, str]) -> Any: 239 """Select one specific array by its name to make it the `active` one.""" 240 # Default (ColorModeToDefault): unsigned char scalars are treated as colors, 241 # and NOT mapped through the lookup table, while everything else is. 242 # ColorModeToDirectScalar extends ColorModeToDefault such that all integer 243 # types are treated as colors with values in the range 0-255 244 # and floating types are treated as colors with values in the range 0.0-1.0. 245 # Setting ColorModeToMapScalars means that all scalar data will be mapped 246 # through the lookup table. 247 # (Note that for multi-component scalars, the particular component 248 # to use for mapping can be specified using the SelectColorArray() method.) 249 if self.association == 0: 250 data = self.obj.dataset.GetPointData() 251 self.obj.mapper.SetScalarModeToUsePointData() 252 else: 253 data = self.obj.dataset.GetCellData() 254 self.obj.mapper.SetScalarModeToUseCellData() 255 256 if isinstance(key, int): 257 key = data.GetArrayName(key) 258 259 arr = data.GetArray(key) 260 if not arr: 261 return self.obj 262 263 nc = arr.GetNumberOfComponents() 264 # print("GetNumberOfComponents", nc) 265 if nc == 1: 266 data.SetActiveScalars(key) 267 elif nc == 2: 268 data.SetTCoords(arr) 269 elif nc in (3, 4): 270 if "rgb" in key.lower(): # type: ignore 271 data.SetActiveScalars(key) 272 try: 273 # could be a volume mapper 274 self.obj.mapper.SetColorModeToDirectScalars() 275 data.SetActiveVectors(None) # need this to fix bug in #1066 276 # print("SetColorModeToDirectScalars for", key) 277 except AttributeError: 278 pass 279 else: 280 data.SetActiveVectors(key) 281 elif nc == 9: 282 data.SetActiveTensors(key) 283 else: 284 vedo.logger.error(f"Cannot select array {key} with {nc} components") 285 return self.obj 286 287 try: 288 # could be a volume mapper 289 self.obj.mapper.SetArrayName(key) 290 self.obj.mapper.ScalarVisibilityOn() 291 except AttributeError: 292 pass 293 294 return self.obj 295 296 def select_texture_coords(self, key: Union[int,str]) -> Any: 297 """Select one specific array to be used as texture coordinates.""" 298 if self.association == 0: 299 data = self.obj.dataset.GetPointData() 300 else: 301 vedo.logger.warning("texture coordinates are only available for point data") 302 return 303 304 if isinstance(key, int): 305 key = data.GetArrayName(key) 306 data.SetTCoords(data.GetArray(key)) 307 return self.obj 308 309 def select_normals(self, key: Union[int,str]) -> Any: 310 """Select one specific normal array by its name to make it the "active" one.""" 311 if self.association == 0: 312 data = self.obj.dataset.GetPointData() 313 self.obj.mapper.SetScalarModeToUsePointData() 314 else: 315 data = self.obj.dataset.GetCellData() 316 self.obj.mapper.SetScalarModeToUseCellData() 317 318 if isinstance(key, int): 319 key = data.GetArrayName(key) 320 data.SetActiveNormals(key) 321 return self.obj 322 323 def print(self, **kwargs) -> None: 324 """Print the array names available to terminal""" 325 colors.printc(self.keys(), **kwargs) 326 327 def __repr__(self) -> str: 328 """Representation""" 329 330 def _get_str(pd, header): 331 out = f"\x1b[2m\x1b[1m\x1b[7m{header}" 332 if pd.GetNumberOfArrays(): 333 if self.obj.name: 334 out += f" in {self.obj.name}" 335 out += f" contains {pd.GetNumberOfArrays()} array(s)\x1b[0m" 336 for i in range(pd.GetNumberOfArrays()): 337 varr = pd.GetArray(i) 338 out += f"\n\x1b[1m\x1b[4mArray name : {varr.GetName()}\x1b[0m" 339 out += "\nindex".ljust(15) + f": {i}" 340 t = varr.GetDataType() 341 if t in vtki.array_types: 342 out += "\ntype".ljust(15) 343 out += f": {vtki.array_types[t]}" 344 shape = (varr.GetNumberOfTuples(), varr.GetNumberOfComponents()) 345 out += "\nshape".ljust(15) + f": {shape}" 346 out += "\nrange".ljust(15) + f": {np.array(varr.GetRange())}" 347 out += "\nmax id".ljust(15) + f": {varr.GetMaxId()}" 348 out += "\nlook up table".ljust(15) + f": {bool(varr.GetLookupTable())}" 349 out += "\nin-memory size".ljust(15) + f": {varr.GetActualMemorySize()} KB" 350 else: 351 out += " is empty.\x1b[0m" 352 return out 353 354 if self.association == 0: 355 out = _get_str(self.obj.dataset.GetPointData(), "Point Data") 356 elif self.association == 1: 357 out = _get_str(self.obj.dataset.GetCellData(), "Cell Data") 358 elif self.association == 2: 359 pd = self.obj.dataset.GetFieldData() 360 if pd.GetNumberOfArrays(): 361 out = "\x1b[2m\x1b[1m\x1b[7mMeta Data" 362 if self.obj.name: 363 out += f" in {self.obj.name}" 364 out += f" contains {pd.GetNumberOfArrays()} entries\x1b[0m" 365 for i in range(pd.GetNumberOfArrays()): 366 varr = pd.GetAbstractArray(i) 367 out += f"\n\x1b[1m\x1b[4mEntry name : {varr.GetName()}\x1b[0m" 368 out += "\nindex".ljust(15) + f": {i}" 369 shape = (varr.GetNumberOfTuples(), varr.GetNumberOfComponents()) 370 out += "\nshape".ljust(15) + f": {shape}" 371 372 return out 373 374 375############################################################################### 376class CommonAlgorithms: 377 """Common algorithms.""" 378 379 @property 380 def pointdata(self): 381 """ 382 Create and/or return a `numpy.array` associated to points (vertices). 383 A data array can be indexed either as a string or by an integer number. 384 E.g.: `myobj.pointdata["arrayname"]` 385 386 Usage: 387 388 `myobj.pointdata.keys()` to return the available data array names 389 390 `myobj.pointdata.select(name)` to make this array the active one 391 392 `myobj.pointdata.remove(name)` to remove this array 393 """ 394 return DataArrayHelper(self, 0) 395 396 @property 397 def celldata(self): 398 """ 399 Create and/or return a `numpy.array` associated to cells (faces). 400 A data array can be indexed either as a string or by an integer number. 401 E.g.: `myobj.celldata["arrayname"]` 402 403 Usage: 404 405 `myobj.celldata.keys()` to return the available data array names 406 407 `myobj.celldata.select(name)` to make this array the active one 408 409 `myobj.celldata.remove(name)` to remove this array 410 """ 411 return DataArrayHelper(self, 1) 412 413 @property 414 def metadata(self): 415 """ 416 Create and/or return a `numpy.array` associated to neither cells nor faces. 417 A data array can be indexed either as a string or by an integer number. 418 E.g.: `myobj.metadata["arrayname"]` 419 420 Usage: 421 422 `myobj.metadata.keys()` to return the available data array names 423 424 `myobj.metadata.select(name)` to make this array the active one 425 426 `myobj.metadata.remove(name)` to remove this array 427 """ 428 return DataArrayHelper(self, 2) 429 430 def memory_address(self) -> int: 431 """ 432 Return a unique memory address integer which may serve as the ID of the 433 object, or passed to c++ code. 434 """ 435 # https://www.linkedin.com/pulse/speedup-your-code-accessing-python-vtk-objects-from-c-pletzer/ 436 # https://github.com/tfmoraes/polydata_connectivity 437 return int(self.dataset.GetAddressAsString("")[5:], 16) 438 439 def memory_size(self) -> int: 440 """Return the size in bytes of the object in memory.""" 441 return self.dataset.GetActualMemorySize() 442 443 def modified(self) -> Self: 444 """Use in conjunction with `tonumpy()` to update any modifications to the image array.""" 445 self.dataset.GetPointData().Modified() 446 scals = self.dataset.GetPointData().GetScalars() 447 if scals: 448 scals.Modified() 449 return self 450 451 def box(self, scale=1, padding=0) -> "vedo.Mesh": 452 """ 453 Return the bounding box as a new `Mesh` object. 454 455 Arguments: 456 scale : (float) 457 box size can be scaled by a factor 458 padding : (float, list) 459 a constant padding can be added (can be a list `[padx,pady,padz]`) 460 """ 461 b = self.bounds() 462 if not utils.is_sequence(padding): 463 padding = [padding, padding, padding] 464 length, width, height = b[1] - b[0], b[3] - b[2], b[5] - b[4] 465 tol = (length + width + height) / 30000 # useful for boxing text 466 pos = [(b[0] + b[1]) / 2, (b[3] + b[2]) / 2, (b[5] + b[4]) / 2 - tol] 467 bx = vedo.shapes.Box( 468 pos, 469 length * scale + padding[0], 470 width * scale + padding[1], 471 height * scale + padding[2], 472 c="gray", 473 ) 474 try: 475 pr = vtki.vtkProperty() 476 pr.DeepCopy(self.properties) 477 bx.actor.SetProperty(pr) 478 bx.properties = pr 479 except (AttributeError, TypeError): 480 pass 481 bx.flat().lighting("off").wireframe(True) 482 return bx 483 484 def update_dataset(self, dataset, **kwargs) -> Self: 485 """Update the dataset of the object with the provided VTK dataset.""" 486 self._update(dataset, **kwargs) 487 return self 488 489 def bounds(self) -> np.ndarray: 490 """ 491 Get the object bounds. 492 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 493 """ 494 try: # this is very slow for large meshes 495 pts = self.vertices 496 xmin, ymin, zmin = np.min(pts, axis=0) 497 xmax, ymax, zmax = np.max(pts, axis=0) 498 return np.array([xmin, xmax, ymin, ymax, zmin, zmax]) 499 except (AttributeError, ValueError): 500 return np.array(self.dataset.GetBounds()) 501 502 def xbounds(self, i=None) -> np.ndarray: 503 """Get the bounds `[xmin,xmax]`. Can specify upper or lower with i (0,1).""" 504 b = self.bounds() 505 if i is not None: 506 return b[i] 507 return np.array([b[0], b[1]]) 508 509 def ybounds(self, i=None) -> np.ndarray: 510 """Get the bounds `[ymin,ymax]`. Can specify upper or lower with i (0,1).""" 511 b = self.bounds() 512 if i == 0: 513 return b[2] 514 if i == 1: 515 return b[3] 516 return np.array([b[2], b[3]]) 517 518 def zbounds(self, i=None) -> np.ndarray: 519 """Get the bounds `[zmin,zmax]`. Can specify upper or lower with i (0,1).""" 520 b = self.bounds() 521 if i == 0: 522 return b[4] 523 if i == 1: 524 return b[5] 525 return np.array([b[4], b[5]]) 526 527 def diagonal_size(self) -> float: 528 """Get the length of the diagonal of the bounding box.""" 529 b = self.bounds() 530 return np.sqrt((b[1] - b[0])**2 + (b[3] - b[2])**2 + (b[5] - b[4])**2) 531 532 def average_size(self) -> float: 533 """ 534 Calculate and return the average size of the object. 535 This is the mean of the vertex distances from the center of mass. 536 """ 537 coords = self.vertices 538 cm = np.mean(coords, axis=0) 539 if coords.shape[0] == 0: 540 return 0.0 541 cc = coords - cm 542 return np.mean(np.linalg.norm(cc, axis=1)) 543 544 def center_of_mass(self) -> np.ndarray: 545 """Get the center of mass of the object.""" 546 if isinstance(self, (vedo.RectilinearGrid, vedo.Volume)): 547 return np.array(self.dataset.GetCenter()) 548 cmf = vtki.new("CenterOfMass") 549 cmf.SetInputData(self.dataset) 550 cmf.Update() 551 c = cmf.GetCenter() 552 return np.array(c) 553 554 def copy_data_from(self, obj: Any) -> Self: 555 """Copy all data (point and cell data) from this input object""" 556 self.dataset.GetPointData().PassData(obj.dataset.GetPointData()) 557 self.dataset.GetCellData().PassData(obj.dataset.GetCellData()) 558 self.pipeline = utils.OperationNode( 559 "copy_data_from", 560 parents=[self, obj], 561 comment=f"{obj.__class__.__name__}", 562 shape="note", 563 c="#ccc5b9", 564 ) 565 return self 566 567 def inputdata(self): 568 """Obsolete, use `.dataset` instead.""" 569 colors.printc("WARNING: 'inputdata()' is obsolete, use '.dataset' instead.", c="y") 570 return self.dataset 571 572 @property 573 def npoints(self): 574 """Retrieve the number of points (or vertices).""" 575 return self.dataset.GetNumberOfPoints() 576 577 @property 578 def nvertices(self): 579 """Retrieve the number of vertices (or points).""" 580 return self.dataset.GetNumberOfPoints() 581 582 @property 583 def ncells(self): 584 """Retrieve the number of cells.""" 585 return self.dataset.GetNumberOfCells() 586 587 def points(self, pts=None): 588 """Obsolete, use `self.vertices` or `self.coordinates` instead.""" 589 if pts is None: ### getter 590 591 if warnings["points_getter"]: 592 colors.printc(warnings["points_getter"], c="y") 593 warnings["points_getter"] = "" 594 return self.vertices 595 596 else: ### setter 597 598 if warnings["points_setter"]: 599 colors.printc(warnings["points_setter"], c="y") 600 warnings["points_setter"] = "" 601 602 pts = np.asarray(pts, dtype=np.float32) 603 604 if pts.ndim == 1: 605 ### getter by point index ################### 606 indices = pts.astype(int) 607 vpts = self.dataset.GetPoints() 608 arr = utils.vtk2numpy(vpts.GetData()) 609 return arr[indices] ########### 610 611 ### setter #################################### 612 if pts.shape[1] == 2: 613 pts = np.c_[pts, np.zeros(pts.shape[0], dtype=np.float32)] 614 arr = utils.numpy2vtk(pts, dtype=np.float32) 615 616 vpts = self.dataset.GetPoints() 617 vpts.SetData(arr) 618 vpts.Modified() 619 # reset mesh to identity matrix position/rotation: 620 self.point_locator = None 621 self.cell_locator = None 622 self.line_locator = None 623 self.transform = LinearTransform() 624 return self 625 626 @property 627 def cell_centers(self): 628 """ 629 Get the coordinates of the cell centers. 630 631 Examples: 632 - [delaunay2d.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/delaunay2d.py) 633 634 See also: `CellCenters()`. 635 """ 636 vcen = vtki.new("CellCenters") 637 vcen.CopyArraysOff() 638 vcen.SetInputData(self.dataset) 639 vcen.Update() 640 return utils.vtk2numpy(vcen.GetOutput().GetPoints().GetData()) 641 642 @property 643 def lines(self): 644 """ 645 Get lines connectivity ids as a python array 646 formatted as `[[id0,id1], [id3,id4], ...]` 647 648 See also: `lines_as_flat_array()`. 649 """ 650 # Get cell connettivity ids as a 1D array. The vtk format is: 651 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 652 try: 653 arr1d = utils.vtk2numpy(self.dataset.GetLines().GetData()) 654 except AttributeError: 655 return np.array([]) 656 i = 0 657 conn = [] 658 n = len(arr1d) 659 for _ in range(n): 660 cell = [arr1d[i + k + 1] for k in range(arr1d[i])] 661 conn.append(cell) 662 i += arr1d[i] + 1 663 if i >= n: 664 break 665 666 return conn # cannot always make a numpy array of it! 667 668 @property 669 def lines_as_flat_array(self): 670 """ 671 Get lines connectivity ids as a 1D numpy array. 672 Format is e.g. [2, 10,20, 3, 10,11,12, 2, 70,80, ...] 673 674 See also: `lines()`. 675 """ 676 try: 677 return utils.vtk2numpy(self.dataset.GetLines().GetData()) 678 except AttributeError: 679 return np.array([]) 680 681 def mark_boundaries(self) -> Self: 682 """ 683 Mark cells and vertices if they lie on a boundary. 684 A new array called `BoundaryCells` is added to the object. 685 """ 686 mb = vtki.new("MarkBoundaryFilter") 687 mb.SetInputData(self.dataset) 688 mb.Update() 689 self.dataset.DeepCopy(mb.GetOutput()) 690 self.pipeline = utils.OperationNode("mark_boundaries", parents=[self]) 691 return self 692 693 def find_cells_in_bounds(self, xbounds=(), ybounds=(), zbounds=()) -> np.ndarray: 694 """ 695 Find cells that are within the specified bounds. 696 """ 697 try: 698 xbounds = list(xbounds.bounds()) 699 except AttributeError: 700 pass 701 702 if len(xbounds) == 6: 703 bnds = xbounds 704 else: 705 bnds = list(self.bounds()) 706 if len(xbounds) == 2: 707 bnds[0] = xbounds[0] 708 bnds[1] = xbounds[1] 709 if len(ybounds) == 2: 710 bnds[2] = ybounds[0] 711 bnds[3] = ybounds[1] 712 if len(zbounds) == 2: 713 bnds[4] = zbounds[0] 714 bnds[5] = zbounds[1] 715 716 cell_ids = vtki.vtkIdList() 717 if not self.cell_locator: 718 self.cell_locator = vtki.new("CellTreeLocator") 719 self.cell_locator.SetDataSet(self.dataset) 720 self.cell_locator.BuildLocator() 721 self.cell_locator.FindCellsWithinBounds(bnds, cell_ids) 722 cids = [] 723 for i in range(cell_ids.GetNumberOfIds()): 724 cid = cell_ids.GetId(i) 725 cids.append(cid) 726 return np.array(cids) 727 728 def find_cells_along_line(self, p0, p1, tol=0.001) -> np.ndarray: 729 """ 730 Find cells that are intersected by a line segment. 731 """ 732 cell_ids = vtki.vtkIdList() 733 if not self.cell_locator: 734 self.cell_locator = vtki.new("CellTreeLocator") 735 self.cell_locator.SetDataSet(self.dataset) 736 self.cell_locator.BuildLocator() 737 self.cell_locator.FindCellsAlongLine(p0, p1, tol, cell_ids) 738 cids = [] 739 for i in range(cell_ids.GetNumberOfIds()): 740 cid = cell_ids.GetId(i) 741 cids.append(cid) 742 return np.array(cids) 743 744 def find_cells_along_plane(self, origin, normal, tol=0.001) -> np.ndarray: 745 """ 746 Find cells that are intersected by a plane. 747 """ 748 cell_ids = vtki.vtkIdList() 749 if not self.cell_locator: 750 self.cell_locator = vtki.new("CellTreeLocator") 751 self.cell_locator.SetDataSet(self.dataset) 752 self.cell_locator.BuildLocator() 753 self.cell_locator.FindCellsAlongPlane(origin, normal, tol, cell_ids) 754 cids = [] 755 for i in range(cell_ids.GetNumberOfIds()): 756 cid = cell_ids.GetId(i) 757 cids.append(cid) 758 return np.array(cids) 759 760 def keep_cell_types(self, types=()): 761 """ 762 Extract cells of a specific type. 763 764 Check the VTK cell types here: 765 https://vtk.org/doc/nightly/html/vtkCellType_8h.html 766 """ 767 fe = vtki.new("ExtractCellsByType") 768 fe.SetInputData(self.dataset) 769 for t in types: 770 try: 771 if utils.is_integer(t): 772 it = t 773 else: 774 it = vtki.cell_types[t.upper()] 775 except KeyError: 776 vedo.logger.error(f"Cell type '{t}' not recognized") 777 continue 778 fe.AddCellType(it) 779 fe.Update() 780 self._update(fe.GetOutput()) 781 return self 782 783 def map_cells_to_points(self, arrays=(), move=False) -> Self: 784 """ 785 Interpolate cell data (i.e., data specified per cell or face) 786 into point data (i.e., data specified at each vertex). 787 The method of transformation is based on averaging the data values 788 of all cells using a particular point. 789 790 A custom list of arrays to be mapped can be passed in input. 791 792 Set `move=True` to delete the original `celldata` array. 793 """ 794 c2p = vtki.new("CellDataToPointData") 795 c2p.SetInputData(self.dataset) 796 if not move: 797 c2p.PassCellDataOn() 798 if arrays: 799 c2p.ClearCellDataArrays() 800 c2p.ProcessAllArraysOff() 801 for arr in arrays: 802 c2p.AddCellDataArray(arr) 803 else: 804 c2p.ProcessAllArraysOn() 805 c2p.Update() 806 self._update(c2p.GetOutput(), reset_locators=False) 807 self.mapper.SetScalarModeToUsePointData() 808 self.pipeline = utils.OperationNode("map_cells_to_points", parents=[self]) 809 return self 810 811 @property 812 def vertices(self): 813 """Return the vertices (points) coordinates.""" 814 try: 815 # for polydata and unstructured grid 816 varr = self.dataset.GetPoints().GetData() 817 except (AttributeError, TypeError): 818 try: 819 # for RectilinearGrid, StructuredGrid 820 vpts = vtki.vtkPoints() 821 self.dataset.GetPoints(vpts) 822 varr = vpts.GetData() 823 except (AttributeError, TypeError): 824 try: 825 # for ImageData 826 v2p = vtki.new("ImageToPoints") 827 v2p.SetInputData(self.dataset) 828 v2p.Update() 829 varr = v2p.GetOutput().GetPoints().GetData() 830 except AttributeError: 831 return np.array([]) 832 833 return utils.vtk2numpy(varr) 834 835 # setter 836 @vertices.setter 837 def vertices(self, pts): 838 """Set vertices (points) coordinates.""" 839 pts = utils.make3d(pts) 840 arr = utils.numpy2vtk(pts, dtype=np.float32) 841 try: 842 vpts = self.dataset.GetPoints() 843 vpts.SetData(arr) 844 vpts.Modified() 845 except (AttributeError, TypeError): 846 vedo.logger.error(f"Cannot set vertices for {type(self)}") 847 return self 848 # reset mesh to identity matrix position/rotation: 849 self.point_locator = None 850 self.cell_locator = None 851 self.line_locator = None 852 self.transform = LinearTransform() 853 854 @property 855 def coordinates(self): 856 """Return the vertices (points) coordinates. Same as `vertices`.""" 857 return self.vertices 858 859 @coordinates.setter 860 def coordinates(self, pts): 861 """Set vertices (points) coordinates. Same as `vertices`.""" 862 self.vertices = pts 863 864 @property 865 def cells_as_flat_array(self): 866 """ 867 Get cell connectivity ids as a 1D numpy array. 868 Format is e.g. [3, 10,20,30 4, 10,11,12,13 ...] 869 """ 870 try: 871 # valid for unstructured grid 872 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 873 except AttributeError: 874 # valid for polydata 875 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 876 return arr1d 877 878 @property 879 def cells(self): 880 """ 881 Get the cells connectivity ids as a numpy array. 882 883 The output format is: `[[id0 ... idn], [id0 ... idm], etc]`. 884 """ 885 try: 886 # valid for unstructured grid 887 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 888 except AttributeError: 889 try: 890 # valid for polydata 891 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 892 except AttributeError: 893 vedo.logger.warning(f"Cannot get cells for {type(self)}") 894 return np.array([]) 895 896 # Get cell connettivity ids as a 1D array. vtk format is: 897 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 898 i = 0 899 conn = [] 900 n = len(arr1d) 901 if n: 902 while True: 903 cell = [arr1d[i + k] for k in range(1, arr1d[i] + 1)] 904 conn.append(cell) 905 i += arr1d[i] + 1 906 if i >= n: 907 break 908 return conn 909 910 def map_points_to_cells(self, arrays=(), move=False) -> Self: 911 """ 912 Interpolate point data (i.e., data specified per point or vertex) 913 into cell data (i.e., data specified per cell). 914 The method of transformation is based on averaging the data values 915 of all points defining a particular cell. 916 917 A custom list of arrays to be mapped can be passed in input. 918 919 Set `move=True` to delete the original `pointdata` array. 920 921 Examples: 922 - [mesh_map2cell.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/mesh_map2cell.py) 923 """ 924 p2c = vtki.new("PointDataToCellData") 925 p2c.SetInputData(self.dataset) 926 if not move: 927 p2c.PassPointDataOn() 928 if arrays: 929 p2c.ClearPointDataArrays() 930 p2c.ProcessAllArraysOff() 931 for arr in arrays: 932 p2c.AddPointDataArray(arr) 933 else: 934 p2c.ProcessAllArraysOn() 935 p2c.Update() 936 self._update(p2c.GetOutput(), reset_locators=False) 937 self.mapper.SetScalarModeToUseCellData() 938 self.pipeline = utils.OperationNode("map_points_to_cells", parents=[self]) 939 return self 940 941 def resample_data_from(self, source, tol=None, categorical=False) -> Self: 942 """ 943 Resample point and cell data from another dataset. 944 The output has the same structure but its point data have 945 the resampled values from target. 946 947 Use `tol` to set the tolerance used to compute whether 948 a point in the source is in a cell of the current object. 949 Points without resampled values, and their cells, are marked as blank. 950 If the data is categorical, then the resulting data will be determined 951 by a nearest neighbor interpolation scheme. 952 953 Example: 954 ```python 955 from vedo import * 956 m1 = Mesh(dataurl+'bunny.obj')#.add_gaussian_noise(0.1) 957 pts = m1.vertices 958 ces = m1.cell_centers 959 m1.pointdata["xvalues"] = np.power(pts[:,0], 3) 960 m1.celldata["yvalues"] = np.power(ces[:,1], 3) 961 m2 = Mesh(dataurl+'bunny.obj') 962 m2.resample_data_from(m1) 963 # print(m2.pointdata["xvalues"]) 964 show(m1, m2 , N=2, axes=1) 965 ``` 966 """ 967 rs = vtki.new("ResampleWithDataSet") 968 rs.SetInputData(self.dataset) 969 rs.SetSourceData(source.dataset) 970 971 rs.SetPassPointArrays(True) 972 rs.SetPassCellArrays(True) 973 rs.SetPassFieldArrays(True) 974 rs.SetCategoricalData(categorical) 975 976 rs.SetComputeTolerance(True) 977 if tol: 978 rs.SetComputeTolerance(False) 979 rs.SetTolerance(tol) 980 rs.Update() 981 self._update(rs.GetOutput(), reset_locators=False) 982 self.pipeline = utils.OperationNode( 983 "resample_data_from", 984 comment=f"{source.__class__.__name__}", 985 parents=[self, source], 986 ) 987 return self 988 989 def interpolate_data_from( 990 self, 991 source, 992 radius=None, 993 n=None, 994 kernel="shepard", 995 exclude=("Normals",), 996 on="points", 997 null_strategy=1, 998 null_value=0, 999 ) -> Self: 1000 """ 1001 Interpolate over source to port its data onto the current object using various kernels. 1002 1003 If n (number of closest points to use) is set then radius value is ignored. 1004 1005 Check out also: 1006 `probe()` which in many cases can be faster. 1007 1008 Arguments: 1009 kernel : (str) 1010 available kernels are [shepard, gaussian, linear] 1011 null_strategy : (int) 1012 specify a strategy to use when encountering a "null" point 1013 during the interpolation process. Null points occur when the local neighborhood 1014 (of nearby points to interpolate from) is empty. 1015 1016 - Case 0: an output array is created that marks points 1017 as being valid (=1) or null (invalid =0), and the null_value is set as well 1018 - Case 1: the output data value(s) are set to the provided null_value 1019 - Case 2: simply use the closest point to perform the interpolation. 1020 null_value : (float) 1021 see above. 1022 1023 Examples: 1024 - [interpolate_scalar1.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar1.py) 1025 - [interpolate_scalar3.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar3.py) 1026 - [interpolate_scalar4.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar4.py) 1027 - [image_probe.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/image_probe.py) 1028 1029 ![](https://vedo.embl.es/images/advanced/interpolateMeshArray.png) 1030 """ 1031 if radius is None and not n: 1032 vedo.logger.error("in interpolate_data_from(): please set either radius or n") 1033 raise RuntimeError 1034 1035 if on == "points": 1036 points = source.dataset 1037 elif on == "cells": 1038 c2p = vtki.new("CellDataToPointData") 1039 c2p.SetInputData(source.dataset) 1040 c2p.Update() 1041 points = c2p.GetOutput() 1042 else: 1043 vedo.logger.error("in interpolate_data_from(), on must be on points or cells") 1044 raise RuntimeError() 1045 1046 locator = vtki.new("PointLocator") 1047 locator.SetDataSet(points) 1048 locator.BuildLocator() 1049 1050 if kernel.lower() == "shepard": 1051 kern = vtki.new("ShepardKernel") 1052 kern.SetPowerParameter(2) 1053 elif kernel.lower() == "gaussian": 1054 kern = vtki.new("GaussianKernel") 1055 kern.SetSharpness(2) 1056 elif kernel.lower() == "linear": 1057 kern = vtki.new("LinearKernel") 1058 else: 1059 vedo.logger.error("available kernels are: [shepard, gaussian, linear]") 1060 raise RuntimeError() 1061 1062 if n: 1063 kern.SetNumberOfPoints(n) 1064 kern.SetKernelFootprintToNClosest() 1065 else: 1066 kern.SetRadius(radius) 1067 kern.SetKernelFootprintToRadius() 1068 1069 interpolator = vtki.new("PointInterpolator") 1070 interpolator.SetInputData(self.dataset) 1071 interpolator.SetSourceData(points) 1072 interpolator.SetKernel(kern) 1073 interpolator.SetLocator(locator) 1074 interpolator.PassFieldArraysOn() 1075 interpolator.SetNullPointsStrategy(null_strategy) 1076 interpolator.SetNullValue(null_value) 1077 interpolator.SetValidPointsMaskArrayName("ValidPointMask") 1078 for ex in exclude: 1079 interpolator.AddExcludedArray(ex) 1080 interpolator.Update() 1081 1082 if on == "cells": 1083 p2c = vtki.new("PointDataToCellData") 1084 p2c.SetInputData(interpolator.GetOutput()) 1085 p2c.Update() 1086 cpoly = p2c.GetOutput() 1087 else: 1088 cpoly = interpolator.GetOutput() 1089 1090 self._update(cpoly, reset_locators=False) 1091 1092 self.pipeline = utils.OperationNode("interpolate_data_from", parents=[self, source]) 1093 return self 1094 1095 def add_ids(self) -> Self: 1096 """ 1097 Generate point and cell ids arrays. 1098 1099 Two new arrays are added to the mesh: `PointID` and `CellID`. 1100 """ 1101 ids = vtki.new("IdFilter") 1102 ids.SetInputData(self.dataset) 1103 ids.PointIdsOn() 1104 ids.CellIdsOn() 1105 ids.FieldDataOff() 1106 ids.SetPointIdsArrayName("PointID") 1107 ids.SetCellIdsArrayName("CellID") 1108 ids.Update() 1109 self._update(ids.GetOutput(), reset_locators=False) 1110 self.pipeline = utils.OperationNode("add_ids", parents=[self]) 1111 return self 1112 1113 def gradient(self, input_array=None, on="points", fast=False) -> np.ndarray: 1114 """ 1115 Compute and return the gradiend of the active scalar field as a numpy array. 1116 1117 Arguments: 1118 input_array : (str) 1119 array of the scalars to compute the gradient, 1120 if None the current active array is selected 1121 on : (str) 1122 compute either on 'points' or 'cells' data 1123 fast : (bool) 1124 if True, will use a less accurate algorithm 1125 that performs fewer derivative calculations (and is therefore faster). 1126 1127 Examples: 1128 - [isolines.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/isolines.py) 1129 1130 ![](https://user-images.githubusercontent.com/32848391/72433087-f00a8780-3798-11ea-9778-991f0abeca70.png) 1131 """ 1132 gra = vtki.new("GradientFilter") 1133 if on.startswith("p"): 1134 varr = self.dataset.GetPointData() 1135 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1136 elif on.startswith("c"): 1137 varr = self.dataset.GetCellData() 1138 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1139 else: 1140 vedo.logger.error(f"in gradient: unknown option {on}") 1141 raise RuntimeError 1142 1143 if input_array is None: 1144 if varr.GetScalars(): 1145 input_array = varr.GetScalars().GetName() 1146 else: 1147 vedo.logger.error(f"in gradient: no scalars found for {on}") 1148 raise RuntimeError 1149 1150 gra.SetInputData(self.dataset) 1151 gra.SetInputScalars(tp, input_array) 1152 gra.SetResultArrayName("Gradient") 1153 gra.SetFasterApproximation(fast) 1154 gra.ComputeDivergenceOff() 1155 gra.ComputeVorticityOff() 1156 gra.ComputeGradientOn() 1157 gra.Update() 1158 if on.startswith("p"): 1159 gvecs = utils.vtk2numpy(gra.GetOutput().GetPointData().GetArray("Gradient")) 1160 else: 1161 gvecs = utils.vtk2numpy(gra.GetOutput().GetCellData().GetArray("Gradient")) 1162 return gvecs 1163 1164 def divergence(self, array_name=None, on="points", fast=False) -> np.ndarray: 1165 """ 1166 Compute and return the divergence of a vector field as a numpy array. 1167 1168 Arguments: 1169 array_name : (str) 1170 name of the array of vectors to compute the divergence, 1171 if None the current active array is selected 1172 on : (str) 1173 compute either on 'points' or 'cells' data 1174 fast : (bool) 1175 if True, will use a less accurate algorithm 1176 that performs fewer derivative calculations (and is therefore faster). 1177 """ 1178 div = vtki.new("GradientFilter") 1179 if on.startswith("p"): 1180 varr = self.dataset.GetPointData() 1181 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1182 elif on.startswith("c"): 1183 varr = self.dataset.GetCellData() 1184 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1185 else: 1186 vedo.logger.error(f"in divergence(): unknown option {on}") 1187 raise RuntimeError 1188 1189 if array_name is None: 1190 if varr.GetVectors(): 1191 array_name = varr.GetVectors().GetName() 1192 else: 1193 vedo.logger.error(f"in divergence(): no vectors found for {on}") 1194 raise RuntimeError 1195 1196 div.SetInputData(self.dataset) 1197 div.SetInputScalars(tp, array_name) 1198 div.ComputeDivergenceOn() 1199 div.ComputeGradientOff() 1200 div.ComputeVorticityOff() 1201 div.SetDivergenceArrayName("Divergence") 1202 div.SetFasterApproximation(fast) 1203 div.Update() 1204 if on.startswith("p"): 1205 dvecs = utils.vtk2numpy(div.GetOutput().GetPointData().GetArray("Divergence")) 1206 else: 1207 dvecs = utils.vtk2numpy(div.GetOutput().GetCellData().GetArray("Divergence")) 1208 return dvecs 1209 1210 def vorticity(self, array_name=None, on="points", fast=False) -> np.ndarray: 1211 """ 1212 Compute and return the vorticity of a vector field as a numpy array. 1213 1214 Arguments: 1215 array_name : (str) 1216 name of the array to compute the vorticity, 1217 if None the current active array is selected 1218 on : (str) 1219 compute either on 'points' or 'cells' data 1220 fast : (bool) 1221 if True, will use a less accurate algorithm 1222 that performs fewer derivative calculations (and is therefore faster). 1223 """ 1224 vort = vtki.new("GradientFilter") 1225 if on.startswith("p"): 1226 varr = self.dataset.GetPointData() 1227 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1228 elif on.startswith("c"): 1229 varr = self.dataset.GetCellData() 1230 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1231 else: 1232 vedo.logger.error(f"in vorticity(): unknown option {on}") 1233 raise RuntimeError 1234 1235 if array_name is None: 1236 if varr.GetVectors(): 1237 array_name = varr.GetVectors().GetName() 1238 else: 1239 vedo.logger.error(f"in vorticity(): no vectors found for {on}") 1240 raise RuntimeError 1241 1242 vort.SetInputData(self.dataset) 1243 vort.SetInputScalars(tp, array_name) 1244 vort.ComputeDivergenceOff() 1245 vort.ComputeGradientOff() 1246 vort.ComputeVorticityOn() 1247 vort.SetVorticityArrayName("Vorticity") 1248 vort.SetFasterApproximation(fast) 1249 vort.Update() 1250 if on.startswith("p"): 1251 vvecs = utils.vtk2numpy(vort.GetOutput().GetPointData().GetArray("Vorticity")) 1252 else: 1253 vvecs = utils.vtk2numpy(vort.GetOutput().GetCellData().GetArray("Vorticity")) 1254 return vvecs 1255 1256 def probe(self, source) -> Self: 1257 """ 1258 Takes a data set and probes its scalars at the specified points in space. 1259 1260 Note that a mask is also output with valid/invalid points which can be accessed 1261 with `mesh.pointdata['ValidPointMask']`. 1262 1263 Check out also: 1264 `interpolate_data_from()` 1265 1266 Examples: 1267 - [probe_points.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/probe_points.py) 1268 1269 ![](https://vedo.embl.es/images/volumetric/probePoints.png) 1270 """ 1271 probe_filter = vtki.new("ProbeFilter") 1272 probe_filter.SetSourceData(source.dataset) 1273 probe_filter.SetInputData(self.dataset) 1274 probe_filter.Update() 1275 self._update(probe_filter.GetOutput(), reset_locators=False) 1276 self.pipeline = utils.OperationNode("probe", parents=[self, source]) 1277 self.pointdata.rename("vtkValidPointMask", "ValidPointMask") 1278 return self 1279 1280 def compute_cell_size(self) -> Self: 1281 """ 1282 Add to this object a cell data array 1283 containing the area, volume and edge length 1284 of the cells (when applicable to the object type). 1285 1286 Array names are: `Area`, `Volume`, `Length`. 1287 """ 1288 csf = vtki.new("CellSizeFilter") 1289 csf.SetInputData(self.dataset) 1290 csf.SetComputeArea(1) 1291 csf.SetComputeVolume(1) 1292 csf.SetComputeLength(1) 1293 csf.SetComputeVertexCount(0) 1294 csf.SetAreaArrayName("Area") 1295 csf.SetVolumeArrayName("Volume") 1296 csf.SetLengthArrayName("Length") 1297 csf.Update() 1298 self._update(csf.GetOutput(), reset_locators=False) 1299 return self 1300 1301 def generate_random_data(self) -> Self: 1302 """Fill a dataset with random attributes""" 1303 gen = vtki.new("RandomAttributeGenerator") 1304 gen.SetInputData(self.dataset) 1305 gen.GenerateAllDataOn() 1306 gen.SetDataTypeToFloat() 1307 gen.GeneratePointNormalsOff() 1308 gen.GeneratePointTensorsOn() 1309 gen.GenerateCellScalarsOn() 1310 gen.Update() 1311 self._update(gen.GetOutput(), reset_locators=False) 1312 self.pipeline = utils.OperationNode("generate_random_data", parents=[self]) 1313 return self 1314 1315 def integrate_data(self) -> dict: 1316 """ 1317 Integrate point and cell data arrays while computing length, 1318 area or volume of the domain. It works for 1D, 2D or 3D cells. 1319 1320 For volumetric datasets, this filter ignores all but 3D cells. 1321 It will not compute the volume contained in a closed surface. 1322 1323 Returns a dictionary with keys: `pointdata`, `celldata`, `metadata`, 1324 which contain the integration result for the corresponding attributes. 1325 1326 Examples: 1327 ```python 1328 from vedo import * 1329 surf = Sphere(res=100) 1330 surf.pointdata['scalars'] = np.ones(surf.npoints) 1331 data = surf.integrate_data() 1332 print(data['pointdata']['scalars'], "is equal to 4pi", 4*np.pi) 1333 ``` 1334 1335 ```python 1336 from vedo import * 1337 1338 xcoords1 = np.arange(0, 2.2, 0.2) 1339 xcoords2 = sqrt(np.arange(0, 4.2, 0.2)) 1340 1341 ycoords = np.arange(0, 1.2, 0.2) 1342 1343 surf1 = Grid(s=(xcoords1, ycoords)).rotate_y(-45).lw(2) 1344 surf2 = Grid(s=(xcoords2, ycoords)).rotate_y(-45).lw(2) 1345 1346 surf1.pointdata['scalars'] = surf1.vertices[:,2] 1347 surf2.pointdata['scalars'] = surf2.vertices[:,2] 1348 1349 data1 = surf1.integrate_data() 1350 data2 = surf2.integrate_data() 1351 1352 print(data1['pointdata']['scalars'], 1353 "is equal to", 1354 data2['pointdata']['scalars'], 1355 "even if the grids are different!", 1356 "(= the volume under the surface)" 1357 ) 1358 show(surf1, surf2, N=2, axes=1).close() 1359 ``` 1360 """ 1361 vinteg = vtki.new("IntegrateAttributes") 1362 vinteg.SetInputData(self.dataset) 1363 vinteg.Update() 1364 ugrid = vedo.UnstructuredGrid(vinteg.GetOutput()) 1365 data = dict( 1366 pointdata=ugrid.pointdata.todict(), 1367 celldata=ugrid.celldata.todict(), 1368 metadata=ugrid.metadata.todict(), 1369 ) 1370 return data 1371 1372 def write(self, filename, binary=True) -> None: 1373 """Write object to file.""" 1374 out = vedo.file_io.write(self, filename, binary) 1375 out.pipeline = utils.OperationNode( 1376 "write", parents=[self], comment=filename[:15], shape="folder", c="#8a817c" 1377 ) 1378 1379 def tomesh(self, bounds=(), shrink=0) -> "vedo.Mesh": 1380 """ 1381 Extract boundary geometry from dataset (or convert data to polygonal type). 1382 1383 Two new arrays are added to the mesh: `OriginalCellIds` and `OriginalPointIds` 1384 to keep track of the original mesh elements. 1385 1386 Arguments: 1387 bounds : (list) 1388 specify a sub-region to extract 1389 shrink : (float) 1390 shrink the cells to a fraction of their original size 1391 """ 1392 geo = vtki.new("GeometryFilter") 1393 1394 if shrink: 1395 sf = vtki.new("ShrinkFilter") 1396 sf.SetInputData(self.dataset) 1397 sf.SetShrinkFactor(shrink) 1398 sf.Update() 1399 geo.SetInputData(sf.GetOutput()) 1400 else: 1401 geo.SetInputData(self.dataset) 1402 1403 geo.SetPassThroughCellIds(1) 1404 geo.SetPassThroughPointIds(1) 1405 geo.SetOriginalCellIdsName("OriginalCellIds") 1406 geo.SetOriginalPointIdsName("OriginalPointIds") 1407 geo.SetNonlinearSubdivisionLevel(1) 1408 # geo.MergingOff() # crashes on StructuredGrids 1409 if bounds: 1410 geo.SetExtent(bounds) 1411 geo.ExtentClippingOn() 1412 geo.Update() 1413 msh = vedo.mesh.Mesh(geo.GetOutput()) 1414 msh.pipeline = utils.OperationNode("tomesh", parents=[self], c="#9e2a2b") 1415 return msh 1416 1417 def signed_distance(self, dims=(20, 20, 20), bounds=None, invert=False, max_radius=None) -> "vedo.Volume": 1418 """ 1419 Compute the `Volume` object whose voxels contains the signed distance from 1420 the object. The calling object must have "Normals" defined. 1421 1422 Arguments: 1423 bounds : (list, actor) 1424 bounding box sizes 1425 dims : (list) 1426 dimensions (nr. of voxels) of the output volume. 1427 invert : (bool) 1428 flip the sign 1429 max_radius : (float) 1430 specify how far out to propagate distance calculation 1431 1432 Examples: 1433 - [distance2mesh.py](https://github.com/marcomusy/vedo/blob/master/examples/basic/distance2mesh.py) 1434 1435 ![](https://vedo.embl.es/images/basic/distance2mesh.png) 1436 """ 1437 if bounds is None: 1438 bounds = self.bounds() 1439 if max_radius is None: 1440 max_radius = self.diagonal_size() / 2 1441 dist = vtki.new("SignedDistance") 1442 dist.SetInputData(self.dataset) 1443 dist.SetRadius(max_radius) 1444 dist.SetBounds(bounds) 1445 dist.SetDimensions(dims) 1446 dist.Update() 1447 img = dist.GetOutput() 1448 if invert: 1449 mat = vtki.new("ImageMathematics") 1450 mat.SetInput1Data(img) 1451 mat.SetOperationToMultiplyByK() 1452 mat.SetConstantK(-1) 1453 mat.Update() 1454 img = mat.GetOutput() 1455 1456 vol = vedo.Volume(img) 1457 vol.name = "SignedDistanceVolume" 1458 vol.pipeline = utils.OperationNode( 1459 "signed_distance", 1460 parents=[self], 1461 comment=f"dims={tuple(vol.dimensions())}", 1462 c="#e9c46a:#0096c7", 1463 ) 1464 return vol 1465 1466 def unsigned_distance( 1467 self, dims=(25,25,25), bounds=(), max_radius=0, cap_value=0) -> "vedo.Volume": 1468 """ 1469 Compute the `Volume` object whose voxels contains the unsigned distance. 1470 """ 1471 dist = vtki.new("UnsignedDistance") 1472 dist.SetInputData(self.dataset) 1473 dist.SetDimensions(dims) 1474 1475 if len(bounds) == 6: 1476 dist.SetBounds(bounds) 1477 # elif bounds == "auto": 1478 # dist.AdjustBoundsOn() 1479 else: 1480 dist.SetBounds(self.bounds()) 1481 if not max_radius: 1482 max_radius = self.diagonal_size() / 10 1483 dist.SetRadius(max_radius) 1484 1485 if self.point_locator: 1486 dist.SetLocator(self.point_locator) 1487 1488 if cap_value is not None: 1489 dist.CappingOn() 1490 dist.SetCapValue(cap_value) 1491 dist.SetOutputScalarTypeToFloat() 1492 dist.Update() 1493 vol = vedo.Volume(dist.GetOutput()) 1494 vol.name = "UnsignedDistanceVolume" 1495 vol.pipeline = utils.OperationNode( 1496 "unsigned_distance", parents=[self], c="#e9c46a:#0096c7") 1497 return vol 1498 1499 def smooth_data(self, 1500 niter=10, relaxation_factor=0.1, strategy=0, mask=None, 1501 exclude=("Normals", "TextureCoordinates"), 1502 ) -> Self: 1503 """ 1504 Smooth point attribute data using distance weighted Laplacian kernel. 1505 1506 The effect is to blur regions of high variation and emphasize low variation regions. 1507 1508 Arguments: 1509 niter : (int) 1510 number of iterations 1511 relaxation_factor : (float) 1512 relaxation factor controlling the amount of Laplacian smoothing applied 1513 strategy : (int) 1514 strategy to use for Laplacian smoothing 1515 - 0: use all points, all point data attributes are smoothed 1516 - 1: smooth all point attribute data except those on the boundary 1517 - 2: only point data connected to a boundary point are smoothed 1518 mask : (str, np.ndarray) 1519 array to be used as a mask (ignore then the strategy keyword) 1520 exclude : (list) 1521 list of arrays to be excluded from smoothing 1522 """ 1523 try: 1524 saf = vtki.new("AttributeSmoothingFilter") 1525 except: 1526 vedo.logger.error("smooth_data() only avaialble in VTK>=9.3.0") 1527 return self 1528 saf.SetInputData(self.dataset) 1529 saf.SetRelaxationFactor(relaxation_factor) 1530 saf.SetNumberOfIterations(niter) 1531 1532 for ex in exclude: 1533 saf.AddExcludedArray(ex) 1534 1535 saf.SetWeightsTypeToDistance2() 1536 1537 saf.SetSmoothingStrategy(strategy) 1538 if mask is not None: 1539 saf.SetSmoothingStrategyToSmoothingMask() 1540 if isinstance(mask, str): 1541 mask_ = self.dataset.GetPointData().GetArray(mask) 1542 if not mask_: 1543 vedo.logger.error(f"smooth_data(): mask array {mask} not found") 1544 return self 1545 mask_array = vtki.vtkUnsignedCharArray() 1546 mask_array.ShallowCopy(mask_) 1547 mask_array.SetName(mask_.GetName()) 1548 else: 1549 mask_array = utils.numpy2vtk(mask, dtype=np.uint8) 1550 saf.SetSmoothingMask(mask_array) 1551 1552 saf.Update() 1553 1554 self._update(saf.GetOutput()) 1555 self.pipeline = utils.OperationNode( 1556 "smooth_data", comment=f"strategy {strategy}", parents=[self], c="#9e2a2b" 1557 ) 1558 return self 1559 1560 def compute_streamlines( 1561 self, 1562 seeds: Any, 1563 integrator="rk4", 1564 direction="forward", 1565 initial_step_size=None, 1566 max_propagation=None, 1567 max_steps=10000, 1568 step_length=0, 1569 surface_constrained=False, 1570 compute_vorticity=False, 1571 ) -> Union["vedo.Lines", None]: 1572 """ 1573 Integrate a vector field to generate streamlines. 1574 1575 Arguments: 1576 seeds : (Mesh, Points, list) 1577 starting points of the streamlines 1578 integrator : (str) 1579 type of integration method to be used: 1580 - "rk2" (Runge-Kutta 2) 1581 - "rk4" (Runge-Kutta 4) 1582 - "rk45" (Runge-Kutta 45) 1583 direction : (str) 1584 direction of integration, either "forward", "backward" or "both" 1585 initial_step_size : (float) 1586 initial step size used for line integration 1587 max_propagation : (float) 1588 maximum length of a streamline expressed in absolute units 1589 max_steps : (int) 1590 maximum number of steps for a streamline 1591 step_length : (float) 1592 maximum length of a step expressed in absolute units 1593 surface_constrained : (bool) 1594 whether to stop integrating when the streamline leaves the surface 1595 compute_vorticity : (bool) 1596 whether to compute the vorticity at each streamline point 1597 """ 1598 b = self.dataset.GetBounds() 1599 size = (b[5]-b[4] + b[3]-b[2] + b[1]-b[0]) / 3 1600 if initial_step_size is None: 1601 initial_step_size = size / 1000.0 1602 1603 if max_propagation is None: 1604 max_propagation = size * 2 1605 1606 if utils.is_sequence(seeds): 1607 seeds = vedo.Points(seeds) 1608 1609 sti = vtki.new("StreamTracer") 1610 sti.SetSourceData(seeds.dataset) 1611 if isinstance(self, vedo.RectilinearGrid): 1612 sti.SetInputData(vedo.UnstructuredGrid(self.dataset).dataset) 1613 else: 1614 sti.SetInputDataObject(self.dataset) 1615 1616 sti.SetInitialIntegrationStep(initial_step_size) 1617 sti.SetComputeVorticity(compute_vorticity) 1618 sti.SetMaximumNumberOfSteps(max_steps) 1619 sti.SetMaximumPropagation(max_propagation) 1620 sti.SetSurfaceStreamlines(surface_constrained) 1621 if step_length: 1622 sti.SetMaximumIntegrationStep(step_length) 1623 1624 if "for" in direction: 1625 sti.SetIntegrationDirectionToForward() 1626 elif "back" in direction: 1627 sti.SetIntegrationDirectionToBackward() 1628 elif "both" in direction: 1629 sti.SetIntegrationDirectionToBoth() 1630 else: 1631 vedo.logger.error(f"in compute_streamlines(), unknown direction {direction}") 1632 return None 1633 1634 if integrator == "rk2": 1635 sti.SetIntegratorTypeToRungeKutta2() 1636 elif integrator == "rk4": 1637 sti.SetIntegratorTypeToRungeKutta4() 1638 elif integrator == "rk45": 1639 sti.SetIntegratorTypeToRungeKutta45() 1640 else: 1641 vedo.logger.error(f"in compute_streamlines(), unknown integrator {integrator}") 1642 return None 1643 1644 sti.Update() 1645 1646 stlines = vedo.shapes.Lines(sti.GetOutput(), lw=4) 1647 stlines.name = "StreamLines" 1648 self.pipeline = utils.OperationNode( 1649 "compute_streamlines", comment=f"{integrator}", parents=[self, seeds], c="#9e2a2b" 1650 ) 1651 return stlines 1652 1653############################################################################### 1654class PointAlgorithms(CommonAlgorithms): 1655 """Methods for point clouds.""" 1656 1657 def apply_transform(self, LT: Any, deep_copy=True) -> Self: 1658 """ 1659 Apply a linear or non-linear transformation to the mesh polygonal data. 1660 1661 Example: 1662 ```python 1663 from vedo import Cube, show, settings 1664 settings.use_parallel_projection = True 1665 c1 = Cube().rotate_z(25).pos(2,1).mirror().alpha(0.5) 1666 T = c1.transform # rotate by 5 degrees, place at (2,1) 1667 c2 = Cube().c('red4').wireframe().lw(10).lighting('off') 1668 c2.apply_transform(T) 1669 show(c1, c2, "The 2 cubes should overlap!", axes=1).close() 1670 ``` 1671 1672 ![](https://vedo.embl.es/images/feats/apply_transform.png) 1673 """ 1674 if self.dataset.GetNumberOfPoints() == 0: 1675 return self 1676 1677 if isinstance(LT, LinearTransform): 1678 LT_is_linear = True 1679 tr = LT.T 1680 if LT.is_identity(): 1681 return self 1682 1683 elif isinstance(LT, (vtki.vtkMatrix4x4, vtki.vtkLinearTransform)) or utils.is_sequence(LT): 1684 LT_is_linear = True 1685 LT = LinearTransform(LT) 1686 tr = LT.T 1687 if LT.is_identity(): 1688 return self 1689 1690 elif isinstance(LT, NonLinearTransform): 1691 LT_is_linear = False 1692 tr = LT.T 1693 self.transform = LT # reset 1694 1695 elif isinstance(LT, vtki.vtkThinPlateSplineTransform): 1696 LT_is_linear = False 1697 tr = LT 1698 self.transform = NonLinearTransform(LT) # reset 1699 1700 else: 1701 vedo.logger.error(f"apply_transform(), unknown input type:\n{LT}") 1702 return self 1703 1704 ################ 1705 if LT_is_linear: 1706 try: 1707 # self.transform might still not be linear 1708 self.transform.concatenate(LT) 1709 except AttributeError: 1710 # in that case reset it 1711 self.transform = LinearTransform() 1712 1713 ################ 1714 if isinstance(self.dataset, vtki.vtkPolyData): 1715 tp = vtki.new("TransformPolyDataFilter") 1716 elif isinstance(self.dataset, vtki.vtkUnstructuredGrid): 1717 tp = vtki.new("TransformFilter") 1718 tp.TransformAllInputVectorsOn() 1719 # elif isinstance(self.dataset, vtki.vtkImageData): 1720 # tp = vtki.new("ImageReslice") 1721 # tp.SetInterpolationModeToCubic() 1722 # tp.SetResliceTransform(tr) 1723 else: 1724 vedo.logger.error(f"apply_transform(), unknown input type: {[self.dataset]}") 1725 return self 1726 1727 tp.SetTransform(tr) 1728 tp.SetInputData(self.dataset) 1729 tp.Update() 1730 out = tp.GetOutput() 1731 1732 if deep_copy: 1733 self.dataset.DeepCopy(out) 1734 else: 1735 self.dataset.ShallowCopy(out) 1736 1737 # reset the locators 1738 self.point_locator = None 1739 self.cell_locator = None 1740 self.line_locator = None 1741 return self 1742 1743 def apply_transform_from_actor(self) -> LinearTransform: 1744 """ 1745 Apply the current transformation of the actor to the data. 1746 Useful when manually moving an actor (eg. when pressing "a"). 1747 Returns the `LinearTransform` object. 1748 1749 Note that this method is automatically called when the window is closed, 1750 or the interactor style is changed. 1751 """ 1752 M = self.actor.GetMatrix() 1753 self.apply_transform(M) 1754 iden = vtki.vtkMatrix4x4() 1755 self.actor.PokeMatrix(iden) 1756 return LinearTransform(M) 1757 1758 def pos(self, x=None, y=None, z=None) -> Self: 1759 """Set/Get object position.""" 1760 if x is None: # get functionality 1761 return self.transform.position 1762 1763 if z is None and y is None: # assume x is of the form (x,y,z) 1764 if len(x) == 3: 1765 x, y, z = x 1766 else: 1767 x, y = x 1768 z = 0 1769 elif z is None: # assume x,y is of the form x, y 1770 z = 0 1771 1772 q = self.transform.position 1773 delta = [x, y, z] - q 1774 if delta[0] == delta[1] == delta[2] == 0: 1775 return self 1776 LT = LinearTransform().translate(delta) 1777 return self.apply_transform(LT) 1778 1779 def shift(self, dx=0, dy=0, dz=0) -> Self: 1780 """Add a vector to the current object position.""" 1781 if utils.is_sequence(dx): 1782 dx, dy, dz = utils.make3d(dx) 1783 if dx == dy == dz == 0: 1784 return self 1785 LT = LinearTransform().translate([dx, dy, dz]) 1786 return self.apply_transform(LT) 1787 1788 def x(self, val=None) -> Self: 1789 """Set/Get object position along x axis.""" 1790 p = self.transform.position 1791 if val is None: 1792 return p[0] 1793 self.pos(val, p[1], p[2]) 1794 return self 1795 1796 def y(self, val=None)-> Self: 1797 """Set/Get object position along y axis.""" 1798 p = self.transform.position 1799 if val is None: 1800 return p[1] 1801 self.pos(p[0], val, p[2]) 1802 return self 1803 1804 def z(self, val=None) -> Self: 1805 """Set/Get object position along z axis.""" 1806 p = self.transform.position 1807 if val is None: 1808 return p[2] 1809 self.pos(p[0], p[1], val) 1810 return self 1811 1812 def rotate(self, angle: float, axis=(1, 0, 0), point=(0, 0, 0), rad=False) -> Self: 1813 """ 1814 Rotate around an arbitrary `axis` passing through `point`. 1815 1816 Example: 1817 ```python 1818 from vedo import * 1819 c1 = Cube() 1820 c2 = c1.clone().c('violet').alpha(0.5) # copy of c1 1821 v = vector(0.2,1,0) 1822 p = vector(1,0,0) # axis passes through this point 1823 c2.rotate(90, axis=v, point=p) 1824 l = Line(-v+p, v+p).lw(3).c('red') 1825 show(c1, l, c2, axes=1).close() 1826 ``` 1827 1828 ![](https://vedo.embl.es/images/feats/rotate_axis.png) 1829 """ 1830 LT = LinearTransform() 1831 LT.rotate(angle, axis, point, rad) 1832 return self.apply_transform(LT) 1833 1834 def rotate_x(self, angle: float, rad=False, around=None) -> Self: 1835 """ 1836 Rotate around x-axis. If angle is in radians set `rad=True`. 1837 1838 Use `around` to define a pivoting point. 1839 """ 1840 if angle == 0: 1841 return self 1842 LT = LinearTransform().rotate_x(angle, rad, around) 1843 return self.apply_transform(LT) 1844 1845 def rotate_y(self, angle: float, rad=False, around=None) -> Self: 1846 """ 1847 Rotate around y-axis. If angle is in radians set `rad=True`. 1848 1849 Use `around` to define a pivoting point. 1850 """ 1851 if angle == 0: 1852 return self 1853 LT = LinearTransform().rotate_y(angle, rad, around) 1854 return self.apply_transform(LT) 1855 1856 def rotate_z(self, angle: float, rad=False, around=None) -> Self: 1857 """ 1858 Rotate around z-axis. If angle is in radians set `rad=True`. 1859 1860 Use `around` to define a pivoting point. 1861 """ 1862 if angle == 0: 1863 return self 1864 LT = LinearTransform().rotate_z(angle, rad, around) 1865 return self.apply_transform(LT) 1866 1867 def reorient(self, initaxis, newaxis, rotation=0, rad=False, xyplane=False) -> Self: 1868 """ 1869 Reorient the object to point to a new direction from an initial one. 1870 If `initaxis` is None, the object will be assumed in its "default" orientation. 1871 If `xyplane` is True, the object will be rotated to lie on the xy plane. 1872 1873 Use `rotation` to first rotate the object around its `initaxis`. 1874 """ 1875 q = self.transform.position 1876 LT = LinearTransform() 1877 LT.reorient(initaxis, newaxis, q, rotation, rad, xyplane) 1878 return self.apply_transform(LT) 1879 1880 def scale(self, s=None, reset=False, origin=True) -> Union[Self, np.array]: 1881 """ 1882 Set/get object's scaling factor. 1883 1884 Arguments: 1885 s : (list, float) 1886 scaling factor(s). 1887 reset : (bool) 1888 if True previous scaling factors are ignored. 1889 origin : (bool) 1890 if True scaling is applied with respect to object's position, 1891 otherwise is applied respect to (0,0,0). 1892 1893 Note: 1894 use `s=(sx,sy,sz)` to scale differently in the three coordinates. 1895 """ 1896 if s is None: 1897 return np.array(self.transform.T.GetScale()) 1898 1899 if not utils.is_sequence(s): 1900 s = [s, s, s] 1901 1902 LT = LinearTransform() 1903 if reset: 1904 old_s = np.array(self.transform.T.GetScale()) 1905 LT.scale(s / old_s) 1906 else: 1907 if origin is True: 1908 LT.scale(s, origin=self.transform.position) 1909 elif origin is False: 1910 LT.scale(s, origin=False) 1911 else: 1912 LT.scale(s, origin=origin) 1913 1914 return self.apply_transform(LT) 1915 1916 1917############################################################################### 1918class VolumeAlgorithms(CommonAlgorithms): 1919 """Methods for Volume objects.""" 1920 1921 def bounds(self) -> np.ndarray: 1922 """ 1923 Get the object bounds. 1924 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 1925 """ 1926 # OVERRIDE CommonAlgorithms.bounds() which is too slow 1927 return np.array(self.dataset.GetBounds()) 1928 1929 def isosurface(self, value=None, flying_edges=False) -> "vedo.mesh.Mesh": 1930 """ 1931 Return an `Mesh` isosurface extracted from the `Volume` object. 1932 1933 Set `value` as single float or list of values to draw the isosurface(s). 1934 Use flying_edges for faster results (but sometimes can interfere with `smooth()`). 1935 1936 Examples: 1937 - [isosurfaces1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces1.py) 1938 1939 ![](https://vedo.embl.es/images/volumetric/isosurfaces.png) 1940 """ 1941 scrange = self.dataset.GetScalarRange() 1942 1943 if flying_edges: 1944 cf = vtki.new("FlyingEdges3D") 1945 cf.InterpolateAttributesOn() 1946 else: 1947 cf = vtki.new("ContourFilter") 1948 cf.UseScalarTreeOn() 1949 1950 cf.SetInputData(self.dataset) 1951 cf.ComputeNormalsOn() 1952 1953 if utils.is_sequence(value): 1954 cf.SetNumberOfContours(len(value)) 1955 for i, t in enumerate(value): 1956 cf.SetValue(i, t) 1957 else: 1958 if value is None: 1959 value = (2 * scrange[0] + scrange[1]) / 3.0 1960 # print("automatic isosurface value =", value) 1961 cf.SetValue(0, value) 1962 1963 cf.Update() 1964 poly = cf.GetOutput() 1965 1966 out = vedo.mesh.Mesh(poly, c=None).phong() 1967 out.mapper.SetScalarRange(scrange[0], scrange[1]) 1968 1969 out.pipeline = utils.OperationNode( 1970 "isosurface", 1971 parents=[self], 1972 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 1973 c="#4cc9f0:#e9c46a", 1974 ) 1975 return out 1976 1977 def isosurface_discrete(self, value=None, nsmooth=15) -> "vedo.mesh.Mesh": 1978 """ 1979 Create boundary/isocontour surfaces from a label map (e.g., a segmented image) using a threaded, 1980 3D version of the multiple objects/labels Surface Nets algorithm. 1981 The input is a 3D image (i.e., volume) where each voxel is labeled 1982 (integer labels are preferred to real values), and the output data is a polygonal mesh separating 1983 labeled regions / objects. 1984 (Note that on output each region [corresponding to a different segmented object] will share 1985 points/edges on a common boundary, i.e., two neighboring objects will share the boundary that separates them). 1986 1987 Arguments: 1988 value : (float, list) 1989 single value or list of values to draw the isosurface(s). 1990 nsmooth : (int) 1991 number of iterations of smoothing (0 means no smoothing). 1992 1993 Examples: 1994 - [isosurfaces2.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces2.py) 1995 """ 1996 if not utils.is_sequence(value): 1997 value = [value] 1998 1999 snets = vtki.new("SurfaceNets3D") 2000 snets.SetInputData(self.dataset) 2001 2002 if nsmooth: 2003 snets.SmoothingOn() 2004 snets.AutomaticSmoothingConstraintsOn() 2005 snets.GetSmoother().SetNumberOfIterations(nsmooth) 2006 # snets.GetSmoother().SetRelaxationFactor(relaxation_factor) 2007 # snets.GetSmoother().SetConstraintDistance(constraint_distance) 2008 else: 2009 snets.SmoothingOff() 2010 2011 for i, val in enumerate(value): 2012 snets.SetValue(i, val) 2013 snets.Update() 2014 snets.SetOutputMeshTypeToTriangles() 2015 snets.SetOutputStyleToBoundary() 2016 snets.Update() 2017 2018 out = vedo.mesh.Mesh(snets.GetOutput()) 2019 out.pipeline = utils.OperationNode( 2020 "isosurface_discrete", 2021 parents=[self], 2022 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 2023 c="#4cc9f0:#e9c46a", 2024 ) 2025 return out 2026 2027 2028 def legosurface( 2029 self, 2030 vmin=None, 2031 vmax=None, 2032 invert=False, 2033 boundary=False, 2034 array_name="input_scalars", 2035 ) -> "vedo.mesh.Mesh": 2036 """ 2037 Represent an object - typically a `Volume` - as lego blocks (voxels). 2038 By default colors correspond to the volume's scalar. 2039 Returns an `Mesh` object. 2040 2041 Arguments: 2042 vmin : (float) 2043 the lower threshold, voxels below this value are not shown. 2044 vmax : (float) 2045 the upper threshold, voxels above this value are not shown. 2046 boundary : (bool) 2047 controls whether to include cells that are partially inside 2048 array_name : (int, str) 2049 name or index of the scalar array to be considered 2050 2051 Examples: 2052 - [legosurface.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/legosurface.py) 2053 2054 ![](https://vedo.embl.es/images/volumetric/56820682-da40e500-684c-11e9-8ea3-91cbcba24b3a.png) 2055 """ 2056 imp_dataset = vtki.new("ImplicitDataSet") 2057 imp_dataset.SetDataSet(self.dataset) 2058 window = vtki.new("ImplicitWindowFunction") 2059 window.SetImplicitFunction(imp_dataset) 2060 2061 srng = list(self.dataset.GetScalarRange()) 2062 if vmin is not None: 2063 srng[0] = vmin 2064 if vmax is not None: 2065 srng[1] = vmax 2066 tol = 0.00001 * (srng[1] - srng[0]) 2067 srng[0] -= tol 2068 srng[1] += tol 2069 window.SetWindowRange(srng) 2070 2071 extract = vtki.new("ExtractGeometry") 2072 extract.SetInputData(self.dataset) 2073 extract.SetImplicitFunction(window) 2074 extract.SetExtractInside(invert) 2075 extract.SetExtractBoundaryCells(boundary) 2076 extract.Update() 2077 2078 gf = vtki.new("GeometryFilter") 2079 gf.SetInputData(extract.GetOutput()) 2080 gf.Update() 2081 2082 m = vedo.mesh.Mesh(gf.GetOutput()).lw(0.1).flat() 2083 m.map_points_to_cells() 2084 m.celldata.select(array_name) 2085 2086 m.pipeline = utils.OperationNode( 2087 "legosurface", 2088 parents=[self], 2089 comment=f"array: {array_name}", 2090 c="#4cc9f0:#e9c46a", 2091 ) 2092 return m 2093 2094 def tomesh(self, fill=True, shrink=1.0) -> "vedo.mesh.Mesh": 2095 """ 2096 Build a polygonal Mesh from the current object. 2097 2098 If `fill=True`, the interior faces of all the cells are created. 2099 (setting a `shrink` value slightly smaller than the default 1.0 2100 can avoid flickering due to internal adjacent faces). 2101 2102 If `fill=False`, only the boundary faces will be generated. 2103 """ 2104 gf = vtki.new("GeometryFilter") 2105 if fill: 2106 sf = vtki.new("ShrinkFilter") 2107 sf.SetInputData(self.dataset) 2108 sf.SetShrinkFactor(shrink) 2109 sf.Update() 2110 gf.SetInputData(sf.GetOutput()) 2111 gf.Update() 2112 poly = gf.GetOutput() 2113 if shrink == 1.0: 2114 clean_poly = vtki.new("CleanPolyData") 2115 clean_poly.PointMergingOn() 2116 clean_poly.ConvertLinesToPointsOn() 2117 clean_poly.ConvertPolysToLinesOn() 2118 clean_poly.ConvertStripsToPolysOn() 2119 clean_poly.SetInputData(poly) 2120 clean_poly.Update() 2121 poly = clean_poly.GetOutput() 2122 else: 2123 gf.SetInputData(self.dataset) 2124 gf.Update() 2125 poly = gf.GetOutput() 2126 2127 msh = vedo.mesh.Mesh(poly).flat() 2128 msh.scalarbar = self.scalarbar 2129 lut = utils.ctf2lut(self) 2130 if lut: 2131 msh.mapper.SetLookupTable(lut) 2132 2133 msh.pipeline = utils.OperationNode( 2134 "tomesh", parents=[self], comment=f"fill={fill}", c="#9e2a2b:#e9c46a" 2135 ) 2136 return msh
45class DataArrayHelper: 46 # Internal use only. 47 # Helper class to manage data associated to either 48 # points (or vertices) and cells (or faces). 49 def __init__(self, obj, association): 50 51 self.obj = obj 52 self.association = association 53 54 def __getitem__(self, key): 55 56 if self.association == 0: 57 data = self.obj.dataset.GetPointData() 58 59 elif self.association == 1: 60 data = self.obj.dataset.GetCellData() 61 62 elif self.association == 2: 63 data = self.obj.dataset.GetFieldData() 64 65 varr = data.GetAbstractArray(key) 66 if isinstance(varr, vtki.vtkStringArray): 67 if isinstance(key, int): 68 key = data.GetArrayName(key) 69 n = varr.GetNumberOfValues() 70 narr = [varr.GetValue(i) for i in range(n)] 71 return narr 72 ########### 73 74 else: 75 raise RuntimeError() 76 77 if isinstance(key, int): 78 key = data.GetArrayName(key) 79 80 arr = data.GetArray(key) 81 if not arr: 82 return None 83 return utils.vtk2numpy(arr) 84 85 def __setitem__(self, key, input_array): 86 87 if self.association == 0: 88 data = self.obj.dataset.GetPointData() 89 n = self.obj.dataset.GetNumberOfPoints() 90 self.obj.mapper.SetScalarModeToUsePointData() 91 92 elif self.association == 1: 93 data = self.obj.dataset.GetCellData() 94 n = self.obj.dataset.GetNumberOfCells() 95 self.obj.mapper.SetScalarModeToUseCellData() 96 97 elif self.association == 2: 98 data = self.obj.dataset.GetFieldData() 99 if not utils.is_sequence(input_array): 100 input_array = [input_array] 101 102 if isinstance(input_array[0], str): 103 varr = vtki.vtkStringArray() 104 varr.SetName(key) 105 varr.SetNumberOfComponents(1) 106 varr.SetNumberOfTuples(len(input_array)) 107 for i, iarr in enumerate(input_array): 108 if isinstance(iarr, np.ndarray): 109 iarr = iarr.tolist() # better format 110 # Note: a string k can be converted to numpy with 111 # import json; k = np.array(json.loads(k)) 112 varr.InsertValue(i, str(iarr)) 113 else: 114 try: 115 varr = utils.numpy2vtk(input_array, name=key) 116 except TypeError as e: 117 vedo.logger.error( 118 f"cannot create metadata with input object:\n" 119 f"{input_array}" 120 f"\n\nAllowed content examples are:\n" 121 f"- flat list of strings ['a','b', 1, [1,2,3], ...]" 122 f" (first item must be a string in this case)\n" 123 f" hint: use k = np.array(json.loads(k)) to convert strings\n" 124 f"- numpy arrays of any shape" 125 ) 126 raise e 127 128 data.AddArray(varr) 129 return ############ 130 131 else: 132 raise RuntimeError() 133 134 if len(input_array) != n: 135 vedo.logger.error( 136 f"Error in point/cell data: length of input {len(input_array)}" 137 f" != {n} nr. of elements" 138 ) 139 raise RuntimeError() 140 141 input_array = np.asarray(input_array) 142 varr = utils.numpy2vtk(input_array, name=key) 143 data.AddArray(varr) 144 145 if len(input_array.shape) == 1: # scalars 146 data.SetActiveScalars(key) 147 try: # could be a volume mapper 148 self.obj.mapper.SetScalarRange(data.GetScalars().GetRange()) 149 except AttributeError: 150 pass 151 elif len(input_array.shape) == 2 and input_array.shape[1] == 3: # vectors 152 if key.lower() == "normals": 153 data.SetActiveNormals(key) 154 else: 155 data.SetActiveVectors(key) 156 157 def keys(self) -> List[str]: 158 """Return the list of available data array names""" 159 if self.association == 0: 160 data = self.obj.dataset.GetPointData() 161 elif self.association == 1: 162 data = self.obj.dataset.GetCellData() 163 elif self.association == 2: 164 data = self.obj.dataset.GetFieldData() 165 arrnames = [] 166 for i in range(data.GetNumberOfArrays()): 167 name = "" 168 if self.association == 2: 169 name = data.GetAbstractArray(i).GetName() 170 else: 171 iarr = data.GetArray(i) 172 if iarr: 173 name = iarr.GetName() 174 if name: 175 arrnames.append(name) 176 return arrnames 177 178 def items(self) -> List: 179 """Return the list of available data array `(names, values)`.""" 180 if self.association == 0: 181 data = self.obj.dataset.GetPointData() 182 elif self.association == 1: 183 data = self.obj.dataset.GetCellData() 184 elif self.association == 2: 185 data = self.obj.dataset.GetFieldData() 186 arrnames = [] 187 for i in range(data.GetNumberOfArrays()): 188 if self.association == 2: 189 name = data.GetAbstractArray(i).GetName() 190 else: 191 name = data.GetArray(i).GetName() 192 if name: 193 arrnames.append((name, self[name])) 194 return arrnames 195 196 def todict(self) -> dict: 197 """Return a dictionary of the available data arrays.""" 198 return dict(self.items()) 199 200 def rename(self, oldname: str, newname: str) -> None: 201 """Rename an array""" 202 if self.association == 0: 203 varr = self.obj.dataset.GetPointData().GetArray(oldname) 204 elif self.association == 1: 205 varr = self.obj.dataset.GetCellData().GetArray(oldname) 206 elif self.association == 2: 207 varr = self.obj.dataset.GetFieldData().GetAbstractArray(oldname) 208 if varr: 209 varr.SetName(newname) 210 else: 211 vedo.logger.warning( 212 f"Cannot rename non existing array {oldname} to {newname}" 213 ) 214 215 def remove(self, key: Union[int, str]) -> None: 216 """Remove a data array by name or number""" 217 if self.association == 0: 218 self.obj.dataset.GetPointData().RemoveArray(key) 219 elif self.association == 1: 220 self.obj.dataset.GetCellData().RemoveArray(key) 221 elif self.association == 2: 222 self.obj.dataset.GetFieldData().RemoveArray(key) 223 224 def clear(self) -> None: 225 """Remove all data associated to this object""" 226 if self.association == 0: 227 data = self.obj.dataset.GetPointData() 228 elif self.association == 1: 229 data = self.obj.dataset.GetCellData() 230 elif self.association == 2: 231 data = self.obj.dataset.GetFieldData() 232 for i in range(data.GetNumberOfArrays()): 233 if self.association == 2: 234 name = data.GetAbstractArray(i).GetName() 235 else: 236 name = data.GetArray(i).GetName() 237 data.RemoveArray(name) 238 239 def select(self, key: Union[int, str]) -> Any: 240 """Select one specific array by its name to make it the `active` one.""" 241 # Default (ColorModeToDefault): unsigned char scalars are treated as colors, 242 # and NOT mapped through the lookup table, while everything else is. 243 # ColorModeToDirectScalar extends ColorModeToDefault such that all integer 244 # types are treated as colors with values in the range 0-255 245 # and floating types are treated as colors with values in the range 0.0-1.0. 246 # Setting ColorModeToMapScalars means that all scalar data will be mapped 247 # through the lookup table. 248 # (Note that for multi-component scalars, the particular component 249 # to use for mapping can be specified using the SelectColorArray() method.) 250 if self.association == 0: 251 data = self.obj.dataset.GetPointData() 252 self.obj.mapper.SetScalarModeToUsePointData() 253 else: 254 data = self.obj.dataset.GetCellData() 255 self.obj.mapper.SetScalarModeToUseCellData() 256 257 if isinstance(key, int): 258 key = data.GetArrayName(key) 259 260 arr = data.GetArray(key) 261 if not arr: 262 return self.obj 263 264 nc = arr.GetNumberOfComponents() 265 # print("GetNumberOfComponents", nc) 266 if nc == 1: 267 data.SetActiveScalars(key) 268 elif nc == 2: 269 data.SetTCoords(arr) 270 elif nc in (3, 4): 271 if "rgb" in key.lower(): # type: ignore 272 data.SetActiveScalars(key) 273 try: 274 # could be a volume mapper 275 self.obj.mapper.SetColorModeToDirectScalars() 276 data.SetActiveVectors(None) # need this to fix bug in #1066 277 # print("SetColorModeToDirectScalars for", key) 278 except AttributeError: 279 pass 280 else: 281 data.SetActiveVectors(key) 282 elif nc == 9: 283 data.SetActiveTensors(key) 284 else: 285 vedo.logger.error(f"Cannot select array {key} with {nc} components") 286 return self.obj 287 288 try: 289 # could be a volume mapper 290 self.obj.mapper.SetArrayName(key) 291 self.obj.mapper.ScalarVisibilityOn() 292 except AttributeError: 293 pass 294 295 return self.obj 296 297 def select_texture_coords(self, key: Union[int,str]) -> Any: 298 """Select one specific array to be used as texture coordinates.""" 299 if self.association == 0: 300 data = self.obj.dataset.GetPointData() 301 else: 302 vedo.logger.warning("texture coordinates are only available for point data") 303 return 304 305 if isinstance(key, int): 306 key = data.GetArrayName(key) 307 data.SetTCoords(data.GetArray(key)) 308 return self.obj 309 310 def select_normals(self, key: Union[int,str]) -> Any: 311 """Select one specific normal array by its name to make it the "active" one.""" 312 if self.association == 0: 313 data = self.obj.dataset.GetPointData() 314 self.obj.mapper.SetScalarModeToUsePointData() 315 else: 316 data = self.obj.dataset.GetCellData() 317 self.obj.mapper.SetScalarModeToUseCellData() 318 319 if isinstance(key, int): 320 key = data.GetArrayName(key) 321 data.SetActiveNormals(key) 322 return self.obj 323 324 def print(self, **kwargs) -> None: 325 """Print the array names available to terminal""" 326 colors.printc(self.keys(), **kwargs) 327 328 def __repr__(self) -> str: 329 """Representation""" 330 331 def _get_str(pd, header): 332 out = f"\x1b[2m\x1b[1m\x1b[7m{header}" 333 if pd.GetNumberOfArrays(): 334 if self.obj.name: 335 out += f" in {self.obj.name}" 336 out += f" contains {pd.GetNumberOfArrays()} array(s)\x1b[0m" 337 for i in range(pd.GetNumberOfArrays()): 338 varr = pd.GetArray(i) 339 out += f"\n\x1b[1m\x1b[4mArray name : {varr.GetName()}\x1b[0m" 340 out += "\nindex".ljust(15) + f": {i}" 341 t = varr.GetDataType() 342 if t in vtki.array_types: 343 out += "\ntype".ljust(15) 344 out += f": {vtki.array_types[t]}" 345 shape = (varr.GetNumberOfTuples(), varr.GetNumberOfComponents()) 346 out += "\nshape".ljust(15) + f": {shape}" 347 out += "\nrange".ljust(15) + f": {np.array(varr.GetRange())}" 348 out += "\nmax id".ljust(15) + f": {varr.GetMaxId()}" 349 out += "\nlook up table".ljust(15) + f": {bool(varr.GetLookupTable())}" 350 out += "\nin-memory size".ljust(15) + f": {varr.GetActualMemorySize()} KB" 351 else: 352 out += " is empty.\x1b[0m" 353 return out 354 355 if self.association == 0: 356 out = _get_str(self.obj.dataset.GetPointData(), "Point Data") 357 elif self.association == 1: 358 out = _get_str(self.obj.dataset.GetCellData(), "Cell Data") 359 elif self.association == 2: 360 pd = self.obj.dataset.GetFieldData() 361 if pd.GetNumberOfArrays(): 362 out = "\x1b[2m\x1b[1m\x1b[7mMeta Data" 363 if self.obj.name: 364 out += f" in {self.obj.name}" 365 out += f" contains {pd.GetNumberOfArrays()} entries\x1b[0m" 366 for i in range(pd.GetNumberOfArrays()): 367 varr = pd.GetAbstractArray(i) 368 out += f"\n\x1b[1m\x1b[4mEntry name : {varr.GetName()}\x1b[0m" 369 out += "\nindex".ljust(15) + f": {i}" 370 shape = (varr.GetNumberOfTuples(), varr.GetNumberOfComponents()) 371 out += "\nshape".ljust(15) + f": {shape}" 372 373 return out
157 def keys(self) -> List[str]: 158 """Return the list of available data array names""" 159 if self.association == 0: 160 data = self.obj.dataset.GetPointData() 161 elif self.association == 1: 162 data = self.obj.dataset.GetCellData() 163 elif self.association == 2: 164 data = self.obj.dataset.GetFieldData() 165 arrnames = [] 166 for i in range(data.GetNumberOfArrays()): 167 name = "" 168 if self.association == 2: 169 name = data.GetAbstractArray(i).GetName() 170 else: 171 iarr = data.GetArray(i) 172 if iarr: 173 name = iarr.GetName() 174 if name: 175 arrnames.append(name) 176 return arrnames
Return the list of available data array names
178 def items(self) -> List: 179 """Return the list of available data array `(names, values)`.""" 180 if self.association == 0: 181 data = self.obj.dataset.GetPointData() 182 elif self.association == 1: 183 data = self.obj.dataset.GetCellData() 184 elif self.association == 2: 185 data = self.obj.dataset.GetFieldData() 186 arrnames = [] 187 for i in range(data.GetNumberOfArrays()): 188 if self.association == 2: 189 name = data.GetAbstractArray(i).GetName() 190 else: 191 name = data.GetArray(i).GetName() 192 if name: 193 arrnames.append((name, self[name])) 194 return arrnames
Return the list of available data array (names, values)
.
196 def todict(self) -> dict: 197 """Return a dictionary of the available data arrays.""" 198 return dict(self.items())
Return a dictionary of the available data arrays.
200 def rename(self, oldname: str, newname: str) -> None: 201 """Rename an array""" 202 if self.association == 0: 203 varr = self.obj.dataset.GetPointData().GetArray(oldname) 204 elif self.association == 1: 205 varr = self.obj.dataset.GetCellData().GetArray(oldname) 206 elif self.association == 2: 207 varr = self.obj.dataset.GetFieldData().GetAbstractArray(oldname) 208 if varr: 209 varr.SetName(newname) 210 else: 211 vedo.logger.warning( 212 f"Cannot rename non existing array {oldname} to {newname}" 213 )
Rename an array
215 def remove(self, key: Union[int, str]) -> None: 216 """Remove a data array by name or number""" 217 if self.association == 0: 218 self.obj.dataset.GetPointData().RemoveArray(key) 219 elif self.association == 1: 220 self.obj.dataset.GetCellData().RemoveArray(key) 221 elif self.association == 2: 222 self.obj.dataset.GetFieldData().RemoveArray(key)
Remove a data array by name or number
224 def clear(self) -> None: 225 """Remove all data associated to this object""" 226 if self.association == 0: 227 data = self.obj.dataset.GetPointData() 228 elif self.association == 1: 229 data = self.obj.dataset.GetCellData() 230 elif self.association == 2: 231 data = self.obj.dataset.GetFieldData() 232 for i in range(data.GetNumberOfArrays()): 233 if self.association == 2: 234 name = data.GetAbstractArray(i).GetName() 235 else: 236 name = data.GetArray(i).GetName() 237 data.RemoveArray(name)
Remove all data associated to this object
239 def select(self, key: Union[int, str]) -> Any: 240 """Select one specific array by its name to make it the `active` one.""" 241 # Default (ColorModeToDefault): unsigned char scalars are treated as colors, 242 # and NOT mapped through the lookup table, while everything else is. 243 # ColorModeToDirectScalar extends ColorModeToDefault such that all integer 244 # types are treated as colors with values in the range 0-255 245 # and floating types are treated as colors with values in the range 0.0-1.0. 246 # Setting ColorModeToMapScalars means that all scalar data will be mapped 247 # through the lookup table. 248 # (Note that for multi-component scalars, the particular component 249 # to use for mapping can be specified using the SelectColorArray() method.) 250 if self.association == 0: 251 data = self.obj.dataset.GetPointData() 252 self.obj.mapper.SetScalarModeToUsePointData() 253 else: 254 data = self.obj.dataset.GetCellData() 255 self.obj.mapper.SetScalarModeToUseCellData() 256 257 if isinstance(key, int): 258 key = data.GetArrayName(key) 259 260 arr = data.GetArray(key) 261 if not arr: 262 return self.obj 263 264 nc = arr.GetNumberOfComponents() 265 # print("GetNumberOfComponents", nc) 266 if nc == 1: 267 data.SetActiveScalars(key) 268 elif nc == 2: 269 data.SetTCoords(arr) 270 elif nc in (3, 4): 271 if "rgb" in key.lower(): # type: ignore 272 data.SetActiveScalars(key) 273 try: 274 # could be a volume mapper 275 self.obj.mapper.SetColorModeToDirectScalars() 276 data.SetActiveVectors(None) # need this to fix bug in #1066 277 # print("SetColorModeToDirectScalars for", key) 278 except AttributeError: 279 pass 280 else: 281 data.SetActiveVectors(key) 282 elif nc == 9: 283 data.SetActiveTensors(key) 284 else: 285 vedo.logger.error(f"Cannot select array {key} with {nc} components") 286 return self.obj 287 288 try: 289 # could be a volume mapper 290 self.obj.mapper.SetArrayName(key) 291 self.obj.mapper.ScalarVisibilityOn() 292 except AttributeError: 293 pass 294 295 return self.obj
Select one specific array by its name to make it the active
one.
297 def select_texture_coords(self, key: Union[int,str]) -> Any: 298 """Select one specific array to be used as texture coordinates.""" 299 if self.association == 0: 300 data = self.obj.dataset.GetPointData() 301 else: 302 vedo.logger.warning("texture coordinates are only available for point data") 303 return 304 305 if isinstance(key, int): 306 key = data.GetArrayName(key) 307 data.SetTCoords(data.GetArray(key)) 308 return self.obj
Select one specific array to be used as texture coordinates.
310 def select_normals(self, key: Union[int,str]) -> Any: 311 """Select one specific normal array by its name to make it the "active" one.""" 312 if self.association == 0: 313 data = self.obj.dataset.GetPointData() 314 self.obj.mapper.SetScalarModeToUsePointData() 315 else: 316 data = self.obj.dataset.GetCellData() 317 self.obj.mapper.SetScalarModeToUseCellData() 318 319 if isinstance(key, int): 320 key = data.GetArrayName(key) 321 data.SetActiveNormals(key) 322 return self.obj
Select one specific normal array by its name to make it the "active" one.
377class CommonAlgorithms: 378 """Common algorithms.""" 379 380 @property 381 def pointdata(self): 382 """ 383 Create and/or return a `numpy.array` associated to points (vertices). 384 A data array can be indexed either as a string or by an integer number. 385 E.g.: `myobj.pointdata["arrayname"]` 386 387 Usage: 388 389 `myobj.pointdata.keys()` to return the available data array names 390 391 `myobj.pointdata.select(name)` to make this array the active one 392 393 `myobj.pointdata.remove(name)` to remove this array 394 """ 395 return DataArrayHelper(self, 0) 396 397 @property 398 def celldata(self): 399 """ 400 Create and/or return a `numpy.array` associated to cells (faces). 401 A data array can be indexed either as a string or by an integer number. 402 E.g.: `myobj.celldata["arrayname"]` 403 404 Usage: 405 406 `myobj.celldata.keys()` to return the available data array names 407 408 `myobj.celldata.select(name)` to make this array the active one 409 410 `myobj.celldata.remove(name)` to remove this array 411 """ 412 return DataArrayHelper(self, 1) 413 414 @property 415 def metadata(self): 416 """ 417 Create and/or return a `numpy.array` associated to neither cells nor faces. 418 A data array can be indexed either as a string or by an integer number. 419 E.g.: `myobj.metadata["arrayname"]` 420 421 Usage: 422 423 `myobj.metadata.keys()` to return the available data array names 424 425 `myobj.metadata.select(name)` to make this array the active one 426 427 `myobj.metadata.remove(name)` to remove this array 428 """ 429 return DataArrayHelper(self, 2) 430 431 def memory_address(self) -> int: 432 """ 433 Return a unique memory address integer which may serve as the ID of the 434 object, or passed to c++ code. 435 """ 436 # https://www.linkedin.com/pulse/speedup-your-code-accessing-python-vtk-objects-from-c-pletzer/ 437 # https://github.com/tfmoraes/polydata_connectivity 438 return int(self.dataset.GetAddressAsString("")[5:], 16) 439 440 def memory_size(self) -> int: 441 """Return the size in bytes of the object in memory.""" 442 return self.dataset.GetActualMemorySize() 443 444 def modified(self) -> Self: 445 """Use in conjunction with `tonumpy()` to update any modifications to the image array.""" 446 self.dataset.GetPointData().Modified() 447 scals = self.dataset.GetPointData().GetScalars() 448 if scals: 449 scals.Modified() 450 return self 451 452 def box(self, scale=1, padding=0) -> "vedo.Mesh": 453 """ 454 Return the bounding box as a new `Mesh` object. 455 456 Arguments: 457 scale : (float) 458 box size can be scaled by a factor 459 padding : (float, list) 460 a constant padding can be added (can be a list `[padx,pady,padz]`) 461 """ 462 b = self.bounds() 463 if not utils.is_sequence(padding): 464 padding = [padding, padding, padding] 465 length, width, height = b[1] - b[0], b[3] - b[2], b[5] - b[4] 466 tol = (length + width + height) / 30000 # useful for boxing text 467 pos = [(b[0] + b[1]) / 2, (b[3] + b[2]) / 2, (b[5] + b[4]) / 2 - tol] 468 bx = vedo.shapes.Box( 469 pos, 470 length * scale + padding[0], 471 width * scale + padding[1], 472 height * scale + padding[2], 473 c="gray", 474 ) 475 try: 476 pr = vtki.vtkProperty() 477 pr.DeepCopy(self.properties) 478 bx.actor.SetProperty(pr) 479 bx.properties = pr 480 except (AttributeError, TypeError): 481 pass 482 bx.flat().lighting("off").wireframe(True) 483 return bx 484 485 def update_dataset(self, dataset, **kwargs) -> Self: 486 """Update the dataset of the object with the provided VTK dataset.""" 487 self._update(dataset, **kwargs) 488 return self 489 490 def bounds(self) -> np.ndarray: 491 """ 492 Get the object bounds. 493 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 494 """ 495 try: # this is very slow for large meshes 496 pts = self.vertices 497 xmin, ymin, zmin = np.min(pts, axis=0) 498 xmax, ymax, zmax = np.max(pts, axis=0) 499 return np.array([xmin, xmax, ymin, ymax, zmin, zmax]) 500 except (AttributeError, ValueError): 501 return np.array(self.dataset.GetBounds()) 502 503 def xbounds(self, i=None) -> np.ndarray: 504 """Get the bounds `[xmin,xmax]`. Can specify upper or lower with i (0,1).""" 505 b = self.bounds() 506 if i is not None: 507 return b[i] 508 return np.array([b[0], b[1]]) 509 510 def ybounds(self, i=None) -> np.ndarray: 511 """Get the bounds `[ymin,ymax]`. Can specify upper or lower with i (0,1).""" 512 b = self.bounds() 513 if i == 0: 514 return b[2] 515 if i == 1: 516 return b[3] 517 return np.array([b[2], b[3]]) 518 519 def zbounds(self, i=None) -> np.ndarray: 520 """Get the bounds `[zmin,zmax]`. Can specify upper or lower with i (0,1).""" 521 b = self.bounds() 522 if i == 0: 523 return b[4] 524 if i == 1: 525 return b[5] 526 return np.array([b[4], b[5]]) 527 528 def diagonal_size(self) -> float: 529 """Get the length of the diagonal of the bounding box.""" 530 b = self.bounds() 531 return np.sqrt((b[1] - b[0])**2 + (b[3] - b[2])**2 + (b[5] - b[4])**2) 532 533 def average_size(self) -> float: 534 """ 535 Calculate and return the average size of the object. 536 This is the mean of the vertex distances from the center of mass. 537 """ 538 coords = self.vertices 539 cm = np.mean(coords, axis=0) 540 if coords.shape[0] == 0: 541 return 0.0 542 cc = coords - cm 543 return np.mean(np.linalg.norm(cc, axis=1)) 544 545 def center_of_mass(self) -> np.ndarray: 546 """Get the center of mass of the object.""" 547 if isinstance(self, (vedo.RectilinearGrid, vedo.Volume)): 548 return np.array(self.dataset.GetCenter()) 549 cmf = vtki.new("CenterOfMass") 550 cmf.SetInputData(self.dataset) 551 cmf.Update() 552 c = cmf.GetCenter() 553 return np.array(c) 554 555 def copy_data_from(self, obj: Any) -> Self: 556 """Copy all data (point and cell data) from this input object""" 557 self.dataset.GetPointData().PassData(obj.dataset.GetPointData()) 558 self.dataset.GetCellData().PassData(obj.dataset.GetCellData()) 559 self.pipeline = utils.OperationNode( 560 "copy_data_from", 561 parents=[self, obj], 562 comment=f"{obj.__class__.__name__}", 563 shape="note", 564 c="#ccc5b9", 565 ) 566 return self 567 568 def inputdata(self): 569 """Obsolete, use `.dataset` instead.""" 570 colors.printc("WARNING: 'inputdata()' is obsolete, use '.dataset' instead.", c="y") 571 return self.dataset 572 573 @property 574 def npoints(self): 575 """Retrieve the number of points (or vertices).""" 576 return self.dataset.GetNumberOfPoints() 577 578 @property 579 def nvertices(self): 580 """Retrieve the number of vertices (or points).""" 581 return self.dataset.GetNumberOfPoints() 582 583 @property 584 def ncells(self): 585 """Retrieve the number of cells.""" 586 return self.dataset.GetNumberOfCells() 587 588 def points(self, pts=None): 589 """Obsolete, use `self.vertices` or `self.coordinates` instead.""" 590 if pts is None: ### getter 591 592 if warnings["points_getter"]: 593 colors.printc(warnings["points_getter"], c="y") 594 warnings["points_getter"] = "" 595 return self.vertices 596 597 else: ### setter 598 599 if warnings["points_setter"]: 600 colors.printc(warnings["points_setter"], c="y") 601 warnings["points_setter"] = "" 602 603 pts = np.asarray(pts, dtype=np.float32) 604 605 if pts.ndim == 1: 606 ### getter by point index ################### 607 indices = pts.astype(int) 608 vpts = self.dataset.GetPoints() 609 arr = utils.vtk2numpy(vpts.GetData()) 610 return arr[indices] ########### 611 612 ### setter #################################### 613 if pts.shape[1] == 2: 614 pts = np.c_[pts, np.zeros(pts.shape[0], dtype=np.float32)] 615 arr = utils.numpy2vtk(pts, dtype=np.float32) 616 617 vpts = self.dataset.GetPoints() 618 vpts.SetData(arr) 619 vpts.Modified() 620 # reset mesh to identity matrix position/rotation: 621 self.point_locator = None 622 self.cell_locator = None 623 self.line_locator = None 624 self.transform = LinearTransform() 625 return self 626 627 @property 628 def cell_centers(self): 629 """ 630 Get the coordinates of the cell centers. 631 632 Examples: 633 - [delaunay2d.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/delaunay2d.py) 634 635 See also: `CellCenters()`. 636 """ 637 vcen = vtki.new("CellCenters") 638 vcen.CopyArraysOff() 639 vcen.SetInputData(self.dataset) 640 vcen.Update() 641 return utils.vtk2numpy(vcen.GetOutput().GetPoints().GetData()) 642 643 @property 644 def lines(self): 645 """ 646 Get lines connectivity ids as a python array 647 formatted as `[[id0,id1], [id3,id4], ...]` 648 649 See also: `lines_as_flat_array()`. 650 """ 651 # Get cell connettivity ids as a 1D array. The vtk format is: 652 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 653 try: 654 arr1d = utils.vtk2numpy(self.dataset.GetLines().GetData()) 655 except AttributeError: 656 return np.array([]) 657 i = 0 658 conn = [] 659 n = len(arr1d) 660 for _ in range(n): 661 cell = [arr1d[i + k + 1] for k in range(arr1d[i])] 662 conn.append(cell) 663 i += arr1d[i] + 1 664 if i >= n: 665 break 666 667 return conn # cannot always make a numpy array of it! 668 669 @property 670 def lines_as_flat_array(self): 671 """ 672 Get lines connectivity ids as a 1D numpy array. 673 Format is e.g. [2, 10,20, 3, 10,11,12, 2, 70,80, ...] 674 675 See also: `lines()`. 676 """ 677 try: 678 return utils.vtk2numpy(self.dataset.GetLines().GetData()) 679 except AttributeError: 680 return np.array([]) 681 682 def mark_boundaries(self) -> Self: 683 """ 684 Mark cells and vertices if they lie on a boundary. 685 A new array called `BoundaryCells` is added to the object. 686 """ 687 mb = vtki.new("MarkBoundaryFilter") 688 mb.SetInputData(self.dataset) 689 mb.Update() 690 self.dataset.DeepCopy(mb.GetOutput()) 691 self.pipeline = utils.OperationNode("mark_boundaries", parents=[self]) 692 return self 693 694 def find_cells_in_bounds(self, xbounds=(), ybounds=(), zbounds=()) -> np.ndarray: 695 """ 696 Find cells that are within the specified bounds. 697 """ 698 try: 699 xbounds = list(xbounds.bounds()) 700 except AttributeError: 701 pass 702 703 if len(xbounds) == 6: 704 bnds = xbounds 705 else: 706 bnds = list(self.bounds()) 707 if len(xbounds) == 2: 708 bnds[0] = xbounds[0] 709 bnds[1] = xbounds[1] 710 if len(ybounds) == 2: 711 bnds[2] = ybounds[0] 712 bnds[3] = ybounds[1] 713 if len(zbounds) == 2: 714 bnds[4] = zbounds[0] 715 bnds[5] = zbounds[1] 716 717 cell_ids = vtki.vtkIdList() 718 if not self.cell_locator: 719 self.cell_locator = vtki.new("CellTreeLocator") 720 self.cell_locator.SetDataSet(self.dataset) 721 self.cell_locator.BuildLocator() 722 self.cell_locator.FindCellsWithinBounds(bnds, cell_ids) 723 cids = [] 724 for i in range(cell_ids.GetNumberOfIds()): 725 cid = cell_ids.GetId(i) 726 cids.append(cid) 727 return np.array(cids) 728 729 def find_cells_along_line(self, p0, p1, tol=0.001) -> np.ndarray: 730 """ 731 Find cells that are intersected by a line segment. 732 """ 733 cell_ids = vtki.vtkIdList() 734 if not self.cell_locator: 735 self.cell_locator = vtki.new("CellTreeLocator") 736 self.cell_locator.SetDataSet(self.dataset) 737 self.cell_locator.BuildLocator() 738 self.cell_locator.FindCellsAlongLine(p0, p1, tol, cell_ids) 739 cids = [] 740 for i in range(cell_ids.GetNumberOfIds()): 741 cid = cell_ids.GetId(i) 742 cids.append(cid) 743 return np.array(cids) 744 745 def find_cells_along_plane(self, origin, normal, tol=0.001) -> np.ndarray: 746 """ 747 Find cells that are intersected by a plane. 748 """ 749 cell_ids = vtki.vtkIdList() 750 if not self.cell_locator: 751 self.cell_locator = vtki.new("CellTreeLocator") 752 self.cell_locator.SetDataSet(self.dataset) 753 self.cell_locator.BuildLocator() 754 self.cell_locator.FindCellsAlongPlane(origin, normal, tol, cell_ids) 755 cids = [] 756 for i in range(cell_ids.GetNumberOfIds()): 757 cid = cell_ids.GetId(i) 758 cids.append(cid) 759 return np.array(cids) 760 761 def keep_cell_types(self, types=()): 762 """ 763 Extract cells of a specific type. 764 765 Check the VTK cell types here: 766 https://vtk.org/doc/nightly/html/vtkCellType_8h.html 767 """ 768 fe = vtki.new("ExtractCellsByType") 769 fe.SetInputData(self.dataset) 770 for t in types: 771 try: 772 if utils.is_integer(t): 773 it = t 774 else: 775 it = vtki.cell_types[t.upper()] 776 except KeyError: 777 vedo.logger.error(f"Cell type '{t}' not recognized") 778 continue 779 fe.AddCellType(it) 780 fe.Update() 781 self._update(fe.GetOutput()) 782 return self 783 784 def map_cells_to_points(self, arrays=(), move=False) -> Self: 785 """ 786 Interpolate cell data (i.e., data specified per cell or face) 787 into point data (i.e., data specified at each vertex). 788 The method of transformation is based on averaging the data values 789 of all cells using a particular point. 790 791 A custom list of arrays to be mapped can be passed in input. 792 793 Set `move=True` to delete the original `celldata` array. 794 """ 795 c2p = vtki.new("CellDataToPointData") 796 c2p.SetInputData(self.dataset) 797 if not move: 798 c2p.PassCellDataOn() 799 if arrays: 800 c2p.ClearCellDataArrays() 801 c2p.ProcessAllArraysOff() 802 for arr in arrays: 803 c2p.AddCellDataArray(arr) 804 else: 805 c2p.ProcessAllArraysOn() 806 c2p.Update() 807 self._update(c2p.GetOutput(), reset_locators=False) 808 self.mapper.SetScalarModeToUsePointData() 809 self.pipeline = utils.OperationNode("map_cells_to_points", parents=[self]) 810 return self 811 812 @property 813 def vertices(self): 814 """Return the vertices (points) coordinates.""" 815 try: 816 # for polydata and unstructured grid 817 varr = self.dataset.GetPoints().GetData() 818 except (AttributeError, TypeError): 819 try: 820 # for RectilinearGrid, StructuredGrid 821 vpts = vtki.vtkPoints() 822 self.dataset.GetPoints(vpts) 823 varr = vpts.GetData() 824 except (AttributeError, TypeError): 825 try: 826 # for ImageData 827 v2p = vtki.new("ImageToPoints") 828 v2p.SetInputData(self.dataset) 829 v2p.Update() 830 varr = v2p.GetOutput().GetPoints().GetData() 831 except AttributeError: 832 return np.array([]) 833 834 return utils.vtk2numpy(varr) 835 836 # setter 837 @vertices.setter 838 def vertices(self, pts): 839 """Set vertices (points) coordinates.""" 840 pts = utils.make3d(pts) 841 arr = utils.numpy2vtk(pts, dtype=np.float32) 842 try: 843 vpts = self.dataset.GetPoints() 844 vpts.SetData(arr) 845 vpts.Modified() 846 except (AttributeError, TypeError): 847 vedo.logger.error(f"Cannot set vertices for {type(self)}") 848 return self 849 # reset mesh to identity matrix position/rotation: 850 self.point_locator = None 851 self.cell_locator = None 852 self.line_locator = None 853 self.transform = LinearTransform() 854 855 @property 856 def coordinates(self): 857 """Return the vertices (points) coordinates. Same as `vertices`.""" 858 return self.vertices 859 860 @coordinates.setter 861 def coordinates(self, pts): 862 """Set vertices (points) coordinates. Same as `vertices`.""" 863 self.vertices = pts 864 865 @property 866 def cells_as_flat_array(self): 867 """ 868 Get cell connectivity ids as a 1D numpy array. 869 Format is e.g. [3, 10,20,30 4, 10,11,12,13 ...] 870 """ 871 try: 872 # valid for unstructured grid 873 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 874 except AttributeError: 875 # valid for polydata 876 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 877 return arr1d 878 879 @property 880 def cells(self): 881 """ 882 Get the cells connectivity ids as a numpy array. 883 884 The output format is: `[[id0 ... idn], [id0 ... idm], etc]`. 885 """ 886 try: 887 # valid for unstructured grid 888 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 889 except AttributeError: 890 try: 891 # valid for polydata 892 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 893 except AttributeError: 894 vedo.logger.warning(f"Cannot get cells for {type(self)}") 895 return np.array([]) 896 897 # Get cell connettivity ids as a 1D array. vtk format is: 898 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 899 i = 0 900 conn = [] 901 n = len(arr1d) 902 if n: 903 while True: 904 cell = [arr1d[i + k] for k in range(1, arr1d[i] + 1)] 905 conn.append(cell) 906 i += arr1d[i] + 1 907 if i >= n: 908 break 909 return conn 910 911 def map_points_to_cells(self, arrays=(), move=False) -> Self: 912 """ 913 Interpolate point data (i.e., data specified per point or vertex) 914 into cell data (i.e., data specified per cell). 915 The method of transformation is based on averaging the data values 916 of all points defining a particular cell. 917 918 A custom list of arrays to be mapped can be passed in input. 919 920 Set `move=True` to delete the original `pointdata` array. 921 922 Examples: 923 - [mesh_map2cell.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/mesh_map2cell.py) 924 """ 925 p2c = vtki.new("PointDataToCellData") 926 p2c.SetInputData(self.dataset) 927 if not move: 928 p2c.PassPointDataOn() 929 if arrays: 930 p2c.ClearPointDataArrays() 931 p2c.ProcessAllArraysOff() 932 for arr in arrays: 933 p2c.AddPointDataArray(arr) 934 else: 935 p2c.ProcessAllArraysOn() 936 p2c.Update() 937 self._update(p2c.GetOutput(), reset_locators=False) 938 self.mapper.SetScalarModeToUseCellData() 939 self.pipeline = utils.OperationNode("map_points_to_cells", parents=[self]) 940 return self 941 942 def resample_data_from(self, source, tol=None, categorical=False) -> Self: 943 """ 944 Resample point and cell data from another dataset. 945 The output has the same structure but its point data have 946 the resampled values from target. 947 948 Use `tol` to set the tolerance used to compute whether 949 a point in the source is in a cell of the current object. 950 Points without resampled values, and their cells, are marked as blank. 951 If the data is categorical, then the resulting data will be determined 952 by a nearest neighbor interpolation scheme. 953 954 Example: 955 ```python 956 from vedo import * 957 m1 = Mesh(dataurl+'bunny.obj')#.add_gaussian_noise(0.1) 958 pts = m1.vertices 959 ces = m1.cell_centers 960 m1.pointdata["xvalues"] = np.power(pts[:,0], 3) 961 m1.celldata["yvalues"] = np.power(ces[:,1], 3) 962 m2 = Mesh(dataurl+'bunny.obj') 963 m2.resample_data_from(m1) 964 # print(m2.pointdata["xvalues"]) 965 show(m1, m2 , N=2, axes=1) 966 ``` 967 """ 968 rs = vtki.new("ResampleWithDataSet") 969 rs.SetInputData(self.dataset) 970 rs.SetSourceData(source.dataset) 971 972 rs.SetPassPointArrays(True) 973 rs.SetPassCellArrays(True) 974 rs.SetPassFieldArrays(True) 975 rs.SetCategoricalData(categorical) 976 977 rs.SetComputeTolerance(True) 978 if tol: 979 rs.SetComputeTolerance(False) 980 rs.SetTolerance(tol) 981 rs.Update() 982 self._update(rs.GetOutput(), reset_locators=False) 983 self.pipeline = utils.OperationNode( 984 "resample_data_from", 985 comment=f"{source.__class__.__name__}", 986 parents=[self, source], 987 ) 988 return self 989 990 def interpolate_data_from( 991 self, 992 source, 993 radius=None, 994 n=None, 995 kernel="shepard", 996 exclude=("Normals",), 997 on="points", 998 null_strategy=1, 999 null_value=0, 1000 ) -> Self: 1001 """ 1002 Interpolate over source to port its data onto the current object using various kernels. 1003 1004 If n (number of closest points to use) is set then radius value is ignored. 1005 1006 Check out also: 1007 `probe()` which in many cases can be faster. 1008 1009 Arguments: 1010 kernel : (str) 1011 available kernels are [shepard, gaussian, linear] 1012 null_strategy : (int) 1013 specify a strategy to use when encountering a "null" point 1014 during the interpolation process. Null points occur when the local neighborhood 1015 (of nearby points to interpolate from) is empty. 1016 1017 - Case 0: an output array is created that marks points 1018 as being valid (=1) or null (invalid =0), and the null_value is set as well 1019 - Case 1: the output data value(s) are set to the provided null_value 1020 - Case 2: simply use the closest point to perform the interpolation. 1021 null_value : (float) 1022 see above. 1023 1024 Examples: 1025 - [interpolate_scalar1.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar1.py) 1026 - [interpolate_scalar3.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar3.py) 1027 - [interpolate_scalar4.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar4.py) 1028 - [image_probe.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/image_probe.py) 1029 1030 ![](https://vedo.embl.es/images/advanced/interpolateMeshArray.png) 1031 """ 1032 if radius is None and not n: 1033 vedo.logger.error("in interpolate_data_from(): please set either radius or n") 1034 raise RuntimeError 1035 1036 if on == "points": 1037 points = source.dataset 1038 elif on == "cells": 1039 c2p = vtki.new("CellDataToPointData") 1040 c2p.SetInputData(source.dataset) 1041 c2p.Update() 1042 points = c2p.GetOutput() 1043 else: 1044 vedo.logger.error("in interpolate_data_from(), on must be on points or cells") 1045 raise RuntimeError() 1046 1047 locator = vtki.new("PointLocator") 1048 locator.SetDataSet(points) 1049 locator.BuildLocator() 1050 1051 if kernel.lower() == "shepard": 1052 kern = vtki.new("ShepardKernel") 1053 kern.SetPowerParameter(2) 1054 elif kernel.lower() == "gaussian": 1055 kern = vtki.new("GaussianKernel") 1056 kern.SetSharpness(2) 1057 elif kernel.lower() == "linear": 1058 kern = vtki.new("LinearKernel") 1059 else: 1060 vedo.logger.error("available kernels are: [shepard, gaussian, linear]") 1061 raise RuntimeError() 1062 1063 if n: 1064 kern.SetNumberOfPoints(n) 1065 kern.SetKernelFootprintToNClosest() 1066 else: 1067 kern.SetRadius(radius) 1068 kern.SetKernelFootprintToRadius() 1069 1070 interpolator = vtki.new("PointInterpolator") 1071 interpolator.SetInputData(self.dataset) 1072 interpolator.SetSourceData(points) 1073 interpolator.SetKernel(kern) 1074 interpolator.SetLocator(locator) 1075 interpolator.PassFieldArraysOn() 1076 interpolator.SetNullPointsStrategy(null_strategy) 1077 interpolator.SetNullValue(null_value) 1078 interpolator.SetValidPointsMaskArrayName("ValidPointMask") 1079 for ex in exclude: 1080 interpolator.AddExcludedArray(ex) 1081 interpolator.Update() 1082 1083 if on == "cells": 1084 p2c = vtki.new("PointDataToCellData") 1085 p2c.SetInputData(interpolator.GetOutput()) 1086 p2c.Update() 1087 cpoly = p2c.GetOutput() 1088 else: 1089 cpoly = interpolator.GetOutput() 1090 1091 self._update(cpoly, reset_locators=False) 1092 1093 self.pipeline = utils.OperationNode("interpolate_data_from", parents=[self, source]) 1094 return self 1095 1096 def add_ids(self) -> Self: 1097 """ 1098 Generate point and cell ids arrays. 1099 1100 Two new arrays are added to the mesh: `PointID` and `CellID`. 1101 """ 1102 ids = vtki.new("IdFilter") 1103 ids.SetInputData(self.dataset) 1104 ids.PointIdsOn() 1105 ids.CellIdsOn() 1106 ids.FieldDataOff() 1107 ids.SetPointIdsArrayName("PointID") 1108 ids.SetCellIdsArrayName("CellID") 1109 ids.Update() 1110 self._update(ids.GetOutput(), reset_locators=False) 1111 self.pipeline = utils.OperationNode("add_ids", parents=[self]) 1112 return self 1113 1114 def gradient(self, input_array=None, on="points", fast=False) -> np.ndarray: 1115 """ 1116 Compute and return the gradiend of the active scalar field as a numpy array. 1117 1118 Arguments: 1119 input_array : (str) 1120 array of the scalars to compute the gradient, 1121 if None the current active array is selected 1122 on : (str) 1123 compute either on 'points' or 'cells' data 1124 fast : (bool) 1125 if True, will use a less accurate algorithm 1126 that performs fewer derivative calculations (and is therefore faster). 1127 1128 Examples: 1129 - [isolines.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/isolines.py) 1130 1131 ![](https://user-images.githubusercontent.com/32848391/72433087-f00a8780-3798-11ea-9778-991f0abeca70.png) 1132 """ 1133 gra = vtki.new("GradientFilter") 1134 if on.startswith("p"): 1135 varr = self.dataset.GetPointData() 1136 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1137 elif on.startswith("c"): 1138 varr = self.dataset.GetCellData() 1139 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1140 else: 1141 vedo.logger.error(f"in gradient: unknown option {on}") 1142 raise RuntimeError 1143 1144 if input_array is None: 1145 if varr.GetScalars(): 1146 input_array = varr.GetScalars().GetName() 1147 else: 1148 vedo.logger.error(f"in gradient: no scalars found for {on}") 1149 raise RuntimeError 1150 1151 gra.SetInputData(self.dataset) 1152 gra.SetInputScalars(tp, input_array) 1153 gra.SetResultArrayName("Gradient") 1154 gra.SetFasterApproximation(fast) 1155 gra.ComputeDivergenceOff() 1156 gra.ComputeVorticityOff() 1157 gra.ComputeGradientOn() 1158 gra.Update() 1159 if on.startswith("p"): 1160 gvecs = utils.vtk2numpy(gra.GetOutput().GetPointData().GetArray("Gradient")) 1161 else: 1162 gvecs = utils.vtk2numpy(gra.GetOutput().GetCellData().GetArray("Gradient")) 1163 return gvecs 1164 1165 def divergence(self, array_name=None, on="points", fast=False) -> np.ndarray: 1166 """ 1167 Compute and return the divergence of a vector field as a numpy array. 1168 1169 Arguments: 1170 array_name : (str) 1171 name of the array of vectors to compute the divergence, 1172 if None the current active array is selected 1173 on : (str) 1174 compute either on 'points' or 'cells' data 1175 fast : (bool) 1176 if True, will use a less accurate algorithm 1177 that performs fewer derivative calculations (and is therefore faster). 1178 """ 1179 div = vtki.new("GradientFilter") 1180 if on.startswith("p"): 1181 varr = self.dataset.GetPointData() 1182 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1183 elif on.startswith("c"): 1184 varr = self.dataset.GetCellData() 1185 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1186 else: 1187 vedo.logger.error(f"in divergence(): unknown option {on}") 1188 raise RuntimeError 1189 1190 if array_name is None: 1191 if varr.GetVectors(): 1192 array_name = varr.GetVectors().GetName() 1193 else: 1194 vedo.logger.error(f"in divergence(): no vectors found for {on}") 1195 raise RuntimeError 1196 1197 div.SetInputData(self.dataset) 1198 div.SetInputScalars(tp, array_name) 1199 div.ComputeDivergenceOn() 1200 div.ComputeGradientOff() 1201 div.ComputeVorticityOff() 1202 div.SetDivergenceArrayName("Divergence") 1203 div.SetFasterApproximation(fast) 1204 div.Update() 1205 if on.startswith("p"): 1206 dvecs = utils.vtk2numpy(div.GetOutput().GetPointData().GetArray("Divergence")) 1207 else: 1208 dvecs = utils.vtk2numpy(div.GetOutput().GetCellData().GetArray("Divergence")) 1209 return dvecs 1210 1211 def vorticity(self, array_name=None, on="points", fast=False) -> np.ndarray: 1212 """ 1213 Compute and return the vorticity of a vector field as a numpy array. 1214 1215 Arguments: 1216 array_name : (str) 1217 name of the array to compute the vorticity, 1218 if None the current active array is selected 1219 on : (str) 1220 compute either on 'points' or 'cells' data 1221 fast : (bool) 1222 if True, will use a less accurate algorithm 1223 that performs fewer derivative calculations (and is therefore faster). 1224 """ 1225 vort = vtki.new("GradientFilter") 1226 if on.startswith("p"): 1227 varr = self.dataset.GetPointData() 1228 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1229 elif on.startswith("c"): 1230 varr = self.dataset.GetCellData() 1231 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1232 else: 1233 vedo.logger.error(f"in vorticity(): unknown option {on}") 1234 raise RuntimeError 1235 1236 if array_name is None: 1237 if varr.GetVectors(): 1238 array_name = varr.GetVectors().GetName() 1239 else: 1240 vedo.logger.error(f"in vorticity(): no vectors found for {on}") 1241 raise RuntimeError 1242 1243 vort.SetInputData(self.dataset) 1244 vort.SetInputScalars(tp, array_name) 1245 vort.ComputeDivergenceOff() 1246 vort.ComputeGradientOff() 1247 vort.ComputeVorticityOn() 1248 vort.SetVorticityArrayName("Vorticity") 1249 vort.SetFasterApproximation(fast) 1250 vort.Update() 1251 if on.startswith("p"): 1252 vvecs = utils.vtk2numpy(vort.GetOutput().GetPointData().GetArray("Vorticity")) 1253 else: 1254 vvecs = utils.vtk2numpy(vort.GetOutput().GetCellData().GetArray("Vorticity")) 1255 return vvecs 1256 1257 def probe(self, source) -> Self: 1258 """ 1259 Takes a data set and probes its scalars at the specified points in space. 1260 1261 Note that a mask is also output with valid/invalid points which can be accessed 1262 with `mesh.pointdata['ValidPointMask']`. 1263 1264 Check out also: 1265 `interpolate_data_from()` 1266 1267 Examples: 1268 - [probe_points.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/probe_points.py) 1269 1270 ![](https://vedo.embl.es/images/volumetric/probePoints.png) 1271 """ 1272 probe_filter = vtki.new("ProbeFilter") 1273 probe_filter.SetSourceData(source.dataset) 1274 probe_filter.SetInputData(self.dataset) 1275 probe_filter.Update() 1276 self._update(probe_filter.GetOutput(), reset_locators=False) 1277 self.pipeline = utils.OperationNode("probe", parents=[self, source]) 1278 self.pointdata.rename("vtkValidPointMask", "ValidPointMask") 1279 return self 1280 1281 def compute_cell_size(self) -> Self: 1282 """ 1283 Add to this object a cell data array 1284 containing the area, volume and edge length 1285 of the cells (when applicable to the object type). 1286 1287 Array names are: `Area`, `Volume`, `Length`. 1288 """ 1289 csf = vtki.new("CellSizeFilter") 1290 csf.SetInputData(self.dataset) 1291 csf.SetComputeArea(1) 1292 csf.SetComputeVolume(1) 1293 csf.SetComputeLength(1) 1294 csf.SetComputeVertexCount(0) 1295 csf.SetAreaArrayName("Area") 1296 csf.SetVolumeArrayName("Volume") 1297 csf.SetLengthArrayName("Length") 1298 csf.Update() 1299 self._update(csf.GetOutput(), reset_locators=False) 1300 return self 1301 1302 def generate_random_data(self) -> Self: 1303 """Fill a dataset with random attributes""" 1304 gen = vtki.new("RandomAttributeGenerator") 1305 gen.SetInputData(self.dataset) 1306 gen.GenerateAllDataOn() 1307 gen.SetDataTypeToFloat() 1308 gen.GeneratePointNormalsOff() 1309 gen.GeneratePointTensorsOn() 1310 gen.GenerateCellScalarsOn() 1311 gen.Update() 1312 self._update(gen.GetOutput(), reset_locators=False) 1313 self.pipeline = utils.OperationNode("generate_random_data", parents=[self]) 1314 return self 1315 1316 def integrate_data(self) -> dict: 1317 """ 1318 Integrate point and cell data arrays while computing length, 1319 area or volume of the domain. It works for 1D, 2D or 3D cells. 1320 1321 For volumetric datasets, this filter ignores all but 3D cells. 1322 It will not compute the volume contained in a closed surface. 1323 1324 Returns a dictionary with keys: `pointdata`, `celldata`, `metadata`, 1325 which contain the integration result for the corresponding attributes. 1326 1327 Examples: 1328 ```python 1329 from vedo import * 1330 surf = Sphere(res=100) 1331 surf.pointdata['scalars'] = np.ones(surf.npoints) 1332 data = surf.integrate_data() 1333 print(data['pointdata']['scalars'], "is equal to 4pi", 4*np.pi) 1334 ``` 1335 1336 ```python 1337 from vedo import * 1338 1339 xcoords1 = np.arange(0, 2.2, 0.2) 1340 xcoords2 = sqrt(np.arange(0, 4.2, 0.2)) 1341 1342 ycoords = np.arange(0, 1.2, 0.2) 1343 1344 surf1 = Grid(s=(xcoords1, ycoords)).rotate_y(-45).lw(2) 1345 surf2 = Grid(s=(xcoords2, ycoords)).rotate_y(-45).lw(2) 1346 1347 surf1.pointdata['scalars'] = surf1.vertices[:,2] 1348 surf2.pointdata['scalars'] = surf2.vertices[:,2] 1349 1350 data1 = surf1.integrate_data() 1351 data2 = surf2.integrate_data() 1352 1353 print(data1['pointdata']['scalars'], 1354 "is equal to", 1355 data2['pointdata']['scalars'], 1356 "even if the grids are different!", 1357 "(= the volume under the surface)" 1358 ) 1359 show(surf1, surf2, N=2, axes=1).close() 1360 ``` 1361 """ 1362 vinteg = vtki.new("IntegrateAttributes") 1363 vinteg.SetInputData(self.dataset) 1364 vinteg.Update() 1365 ugrid = vedo.UnstructuredGrid(vinteg.GetOutput()) 1366 data = dict( 1367 pointdata=ugrid.pointdata.todict(), 1368 celldata=ugrid.celldata.todict(), 1369 metadata=ugrid.metadata.todict(), 1370 ) 1371 return data 1372 1373 def write(self, filename, binary=True) -> None: 1374 """Write object to file.""" 1375 out = vedo.file_io.write(self, filename, binary) 1376 out.pipeline = utils.OperationNode( 1377 "write", parents=[self], comment=filename[:15], shape="folder", c="#8a817c" 1378 ) 1379 1380 def tomesh(self, bounds=(), shrink=0) -> "vedo.Mesh": 1381 """ 1382 Extract boundary geometry from dataset (or convert data to polygonal type). 1383 1384 Two new arrays are added to the mesh: `OriginalCellIds` and `OriginalPointIds` 1385 to keep track of the original mesh elements. 1386 1387 Arguments: 1388 bounds : (list) 1389 specify a sub-region to extract 1390 shrink : (float) 1391 shrink the cells to a fraction of their original size 1392 """ 1393 geo = vtki.new("GeometryFilter") 1394 1395 if shrink: 1396 sf = vtki.new("ShrinkFilter") 1397 sf.SetInputData(self.dataset) 1398 sf.SetShrinkFactor(shrink) 1399 sf.Update() 1400 geo.SetInputData(sf.GetOutput()) 1401 else: 1402 geo.SetInputData(self.dataset) 1403 1404 geo.SetPassThroughCellIds(1) 1405 geo.SetPassThroughPointIds(1) 1406 geo.SetOriginalCellIdsName("OriginalCellIds") 1407 geo.SetOriginalPointIdsName("OriginalPointIds") 1408 geo.SetNonlinearSubdivisionLevel(1) 1409 # geo.MergingOff() # crashes on StructuredGrids 1410 if bounds: 1411 geo.SetExtent(bounds) 1412 geo.ExtentClippingOn() 1413 geo.Update() 1414 msh = vedo.mesh.Mesh(geo.GetOutput()) 1415 msh.pipeline = utils.OperationNode("tomesh", parents=[self], c="#9e2a2b") 1416 return msh 1417 1418 def signed_distance(self, dims=(20, 20, 20), bounds=None, invert=False, max_radius=None) -> "vedo.Volume": 1419 """ 1420 Compute the `Volume` object whose voxels contains the signed distance from 1421 the object. The calling object must have "Normals" defined. 1422 1423 Arguments: 1424 bounds : (list, actor) 1425 bounding box sizes 1426 dims : (list) 1427 dimensions (nr. of voxels) of the output volume. 1428 invert : (bool) 1429 flip the sign 1430 max_radius : (float) 1431 specify how far out to propagate distance calculation 1432 1433 Examples: 1434 - [distance2mesh.py](https://github.com/marcomusy/vedo/blob/master/examples/basic/distance2mesh.py) 1435 1436 ![](https://vedo.embl.es/images/basic/distance2mesh.png) 1437 """ 1438 if bounds is None: 1439 bounds = self.bounds() 1440 if max_radius is None: 1441 max_radius = self.diagonal_size() / 2 1442 dist = vtki.new("SignedDistance") 1443 dist.SetInputData(self.dataset) 1444 dist.SetRadius(max_radius) 1445 dist.SetBounds(bounds) 1446 dist.SetDimensions(dims) 1447 dist.Update() 1448 img = dist.GetOutput() 1449 if invert: 1450 mat = vtki.new("ImageMathematics") 1451 mat.SetInput1Data(img) 1452 mat.SetOperationToMultiplyByK() 1453 mat.SetConstantK(-1) 1454 mat.Update() 1455 img = mat.GetOutput() 1456 1457 vol = vedo.Volume(img) 1458 vol.name = "SignedDistanceVolume" 1459 vol.pipeline = utils.OperationNode( 1460 "signed_distance", 1461 parents=[self], 1462 comment=f"dims={tuple(vol.dimensions())}", 1463 c="#e9c46a:#0096c7", 1464 ) 1465 return vol 1466 1467 def unsigned_distance( 1468 self, dims=(25,25,25), bounds=(), max_radius=0, cap_value=0) -> "vedo.Volume": 1469 """ 1470 Compute the `Volume` object whose voxels contains the unsigned distance. 1471 """ 1472 dist = vtki.new("UnsignedDistance") 1473 dist.SetInputData(self.dataset) 1474 dist.SetDimensions(dims) 1475 1476 if len(bounds) == 6: 1477 dist.SetBounds(bounds) 1478 # elif bounds == "auto": 1479 # dist.AdjustBoundsOn() 1480 else: 1481 dist.SetBounds(self.bounds()) 1482 if not max_radius: 1483 max_radius = self.diagonal_size() / 10 1484 dist.SetRadius(max_radius) 1485 1486 if self.point_locator: 1487 dist.SetLocator(self.point_locator) 1488 1489 if cap_value is not None: 1490 dist.CappingOn() 1491 dist.SetCapValue(cap_value) 1492 dist.SetOutputScalarTypeToFloat() 1493 dist.Update() 1494 vol = vedo.Volume(dist.GetOutput()) 1495 vol.name = "UnsignedDistanceVolume" 1496 vol.pipeline = utils.OperationNode( 1497 "unsigned_distance", parents=[self], c="#e9c46a:#0096c7") 1498 return vol 1499 1500 def smooth_data(self, 1501 niter=10, relaxation_factor=0.1, strategy=0, mask=None, 1502 exclude=("Normals", "TextureCoordinates"), 1503 ) -> Self: 1504 """ 1505 Smooth point attribute data using distance weighted Laplacian kernel. 1506 1507 The effect is to blur regions of high variation and emphasize low variation regions. 1508 1509 Arguments: 1510 niter : (int) 1511 number of iterations 1512 relaxation_factor : (float) 1513 relaxation factor controlling the amount of Laplacian smoothing applied 1514 strategy : (int) 1515 strategy to use for Laplacian smoothing 1516 - 0: use all points, all point data attributes are smoothed 1517 - 1: smooth all point attribute data except those on the boundary 1518 - 2: only point data connected to a boundary point are smoothed 1519 mask : (str, np.ndarray) 1520 array to be used as a mask (ignore then the strategy keyword) 1521 exclude : (list) 1522 list of arrays to be excluded from smoothing 1523 """ 1524 try: 1525 saf = vtki.new("AttributeSmoothingFilter") 1526 except: 1527 vedo.logger.error("smooth_data() only avaialble in VTK>=9.3.0") 1528 return self 1529 saf.SetInputData(self.dataset) 1530 saf.SetRelaxationFactor(relaxation_factor) 1531 saf.SetNumberOfIterations(niter) 1532 1533 for ex in exclude: 1534 saf.AddExcludedArray(ex) 1535 1536 saf.SetWeightsTypeToDistance2() 1537 1538 saf.SetSmoothingStrategy(strategy) 1539 if mask is not None: 1540 saf.SetSmoothingStrategyToSmoothingMask() 1541 if isinstance(mask, str): 1542 mask_ = self.dataset.GetPointData().GetArray(mask) 1543 if not mask_: 1544 vedo.logger.error(f"smooth_data(): mask array {mask} not found") 1545 return self 1546 mask_array = vtki.vtkUnsignedCharArray() 1547 mask_array.ShallowCopy(mask_) 1548 mask_array.SetName(mask_.GetName()) 1549 else: 1550 mask_array = utils.numpy2vtk(mask, dtype=np.uint8) 1551 saf.SetSmoothingMask(mask_array) 1552 1553 saf.Update() 1554 1555 self._update(saf.GetOutput()) 1556 self.pipeline = utils.OperationNode( 1557 "smooth_data", comment=f"strategy {strategy}", parents=[self], c="#9e2a2b" 1558 ) 1559 return self 1560 1561 def compute_streamlines( 1562 self, 1563 seeds: Any, 1564 integrator="rk4", 1565 direction="forward", 1566 initial_step_size=None, 1567 max_propagation=None, 1568 max_steps=10000, 1569 step_length=0, 1570 surface_constrained=False, 1571 compute_vorticity=False, 1572 ) -> Union["vedo.Lines", None]: 1573 """ 1574 Integrate a vector field to generate streamlines. 1575 1576 Arguments: 1577 seeds : (Mesh, Points, list) 1578 starting points of the streamlines 1579 integrator : (str) 1580 type of integration method to be used: 1581 - "rk2" (Runge-Kutta 2) 1582 - "rk4" (Runge-Kutta 4) 1583 - "rk45" (Runge-Kutta 45) 1584 direction : (str) 1585 direction of integration, either "forward", "backward" or "both" 1586 initial_step_size : (float) 1587 initial step size used for line integration 1588 max_propagation : (float) 1589 maximum length of a streamline expressed in absolute units 1590 max_steps : (int) 1591 maximum number of steps for a streamline 1592 step_length : (float) 1593 maximum length of a step expressed in absolute units 1594 surface_constrained : (bool) 1595 whether to stop integrating when the streamline leaves the surface 1596 compute_vorticity : (bool) 1597 whether to compute the vorticity at each streamline point 1598 """ 1599 b = self.dataset.GetBounds() 1600 size = (b[5]-b[4] + b[3]-b[2] + b[1]-b[0]) / 3 1601 if initial_step_size is None: 1602 initial_step_size = size / 1000.0 1603 1604 if max_propagation is None: 1605 max_propagation = size * 2 1606 1607 if utils.is_sequence(seeds): 1608 seeds = vedo.Points(seeds) 1609 1610 sti = vtki.new("StreamTracer") 1611 sti.SetSourceData(seeds.dataset) 1612 if isinstance(self, vedo.RectilinearGrid): 1613 sti.SetInputData(vedo.UnstructuredGrid(self.dataset).dataset) 1614 else: 1615 sti.SetInputDataObject(self.dataset) 1616 1617 sti.SetInitialIntegrationStep(initial_step_size) 1618 sti.SetComputeVorticity(compute_vorticity) 1619 sti.SetMaximumNumberOfSteps(max_steps) 1620 sti.SetMaximumPropagation(max_propagation) 1621 sti.SetSurfaceStreamlines(surface_constrained) 1622 if step_length: 1623 sti.SetMaximumIntegrationStep(step_length) 1624 1625 if "for" in direction: 1626 sti.SetIntegrationDirectionToForward() 1627 elif "back" in direction: 1628 sti.SetIntegrationDirectionToBackward() 1629 elif "both" in direction: 1630 sti.SetIntegrationDirectionToBoth() 1631 else: 1632 vedo.logger.error(f"in compute_streamlines(), unknown direction {direction}") 1633 return None 1634 1635 if integrator == "rk2": 1636 sti.SetIntegratorTypeToRungeKutta2() 1637 elif integrator == "rk4": 1638 sti.SetIntegratorTypeToRungeKutta4() 1639 elif integrator == "rk45": 1640 sti.SetIntegratorTypeToRungeKutta45() 1641 else: 1642 vedo.logger.error(f"in compute_streamlines(), unknown integrator {integrator}") 1643 return None 1644 1645 sti.Update() 1646 1647 stlines = vedo.shapes.Lines(sti.GetOutput(), lw=4) 1648 stlines.name = "StreamLines" 1649 self.pipeline = utils.OperationNode( 1650 "compute_streamlines", comment=f"{integrator}", parents=[self, seeds], c="#9e2a2b" 1651 ) 1652 return stlines
Common algorithms.
380 @property 381 def pointdata(self): 382 """ 383 Create and/or return a `numpy.array` associated to points (vertices). 384 A data array can be indexed either as a string or by an integer number. 385 E.g.: `myobj.pointdata["arrayname"]` 386 387 Usage: 388 389 `myobj.pointdata.keys()` to return the available data array names 390 391 `myobj.pointdata.select(name)` to make this array the active one 392 393 `myobj.pointdata.remove(name)` to remove this array 394 """ 395 return DataArrayHelper(self, 0)
Create and/or return a numpy.array
associated to points (vertices).
A data array can be indexed either as a string or by an integer number.
E.g.: myobj.pointdata["arrayname"]
Usage:
myobj.pointdata.keys()
to return the available data array names
myobj.pointdata.select(name)
to make this array the active one
myobj.pointdata.remove(name)
to remove this array
397 @property 398 def celldata(self): 399 """ 400 Create and/or return a `numpy.array` associated to cells (faces). 401 A data array can be indexed either as a string or by an integer number. 402 E.g.: `myobj.celldata["arrayname"]` 403 404 Usage: 405 406 `myobj.celldata.keys()` to return the available data array names 407 408 `myobj.celldata.select(name)` to make this array the active one 409 410 `myobj.celldata.remove(name)` to remove this array 411 """ 412 return DataArrayHelper(self, 1)
Create and/or return a numpy.array
associated to cells (faces).
A data array can be indexed either as a string or by an integer number.
E.g.: myobj.celldata["arrayname"]
Usage:
myobj.celldata.keys()
to return the available data array names
myobj.celldata.select(name)
to make this array the active one
myobj.celldata.remove(name)
to remove this array
414 @property 415 def metadata(self): 416 """ 417 Create and/or return a `numpy.array` associated to neither cells nor faces. 418 A data array can be indexed either as a string or by an integer number. 419 E.g.: `myobj.metadata["arrayname"]` 420 421 Usage: 422 423 `myobj.metadata.keys()` to return the available data array names 424 425 `myobj.metadata.select(name)` to make this array the active one 426 427 `myobj.metadata.remove(name)` to remove this array 428 """ 429 return DataArrayHelper(self, 2)
Create and/or return a numpy.array
associated to neither cells nor faces.
A data array can be indexed either as a string or by an integer number.
E.g.: myobj.metadata["arrayname"]
Usage:
myobj.metadata.keys()
to return the available data array names
myobj.metadata.select(name)
to make this array the active one
myobj.metadata.remove(name)
to remove this array
431 def memory_address(self) -> int: 432 """ 433 Return a unique memory address integer which may serve as the ID of the 434 object, or passed to c++ code. 435 """ 436 # https://www.linkedin.com/pulse/speedup-your-code-accessing-python-vtk-objects-from-c-pletzer/ 437 # https://github.com/tfmoraes/polydata_connectivity 438 return int(self.dataset.GetAddressAsString("")[5:], 16)
Return a unique memory address integer which may serve as the ID of the object, or passed to c++ code.
440 def memory_size(self) -> int: 441 """Return the size in bytes of the object in memory.""" 442 return self.dataset.GetActualMemorySize()
Return the size in bytes of the object in memory.
444 def modified(self) -> Self: 445 """Use in conjunction with `tonumpy()` to update any modifications to the image array.""" 446 self.dataset.GetPointData().Modified() 447 scals = self.dataset.GetPointData().GetScalars() 448 if scals: 449 scals.Modified() 450 return self
Use in conjunction with tonumpy()
to update any modifications to the image array.
452 def box(self, scale=1, padding=0) -> "vedo.Mesh": 453 """ 454 Return the bounding box as a new `Mesh` object. 455 456 Arguments: 457 scale : (float) 458 box size can be scaled by a factor 459 padding : (float, list) 460 a constant padding can be added (can be a list `[padx,pady,padz]`) 461 """ 462 b = self.bounds() 463 if not utils.is_sequence(padding): 464 padding = [padding, padding, padding] 465 length, width, height = b[1] - b[0], b[3] - b[2], b[5] - b[4] 466 tol = (length + width + height) / 30000 # useful for boxing text 467 pos = [(b[0] + b[1]) / 2, (b[3] + b[2]) / 2, (b[5] + b[4]) / 2 - tol] 468 bx = vedo.shapes.Box( 469 pos, 470 length * scale + padding[0], 471 width * scale + padding[1], 472 height * scale + padding[2], 473 c="gray", 474 ) 475 try: 476 pr = vtki.vtkProperty() 477 pr.DeepCopy(self.properties) 478 bx.actor.SetProperty(pr) 479 bx.properties = pr 480 except (AttributeError, TypeError): 481 pass 482 bx.flat().lighting("off").wireframe(True) 483 return bx
Return the bounding box as a new Mesh
object.
Arguments:
- scale : (float) box size can be scaled by a factor
- padding : (float, list)
a constant padding can be added (can be a list
[padx,pady,padz]
)
485 def update_dataset(self, dataset, **kwargs) -> Self: 486 """Update the dataset of the object with the provided VTK dataset.""" 487 self._update(dataset, **kwargs) 488 return self
Update the dataset of the object with the provided VTK dataset.
490 def bounds(self) -> np.ndarray: 491 """ 492 Get the object bounds. 493 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 494 """ 495 try: # this is very slow for large meshes 496 pts = self.vertices 497 xmin, ymin, zmin = np.min(pts, axis=0) 498 xmax, ymax, zmax = np.max(pts, axis=0) 499 return np.array([xmin, xmax, ymin, ymax, zmin, zmax]) 500 except (AttributeError, ValueError): 501 return np.array(self.dataset.GetBounds())
Get the object bounds.
Returns a list in format [xmin,xmax, ymin,ymax, zmin,zmax]
.
503 def xbounds(self, i=None) -> np.ndarray: 504 """Get the bounds `[xmin,xmax]`. Can specify upper or lower with i (0,1).""" 505 b = self.bounds() 506 if i is not None: 507 return b[i] 508 return np.array([b[0], b[1]])
Get the bounds [xmin,xmax]
. Can specify upper or lower with i (0,1).
510 def ybounds(self, i=None) -> np.ndarray: 511 """Get the bounds `[ymin,ymax]`. Can specify upper or lower with i (0,1).""" 512 b = self.bounds() 513 if i == 0: 514 return b[2] 515 if i == 1: 516 return b[3] 517 return np.array([b[2], b[3]])
Get the bounds [ymin,ymax]
. Can specify upper or lower with i (0,1).
519 def zbounds(self, i=None) -> np.ndarray: 520 """Get the bounds `[zmin,zmax]`. Can specify upper or lower with i (0,1).""" 521 b = self.bounds() 522 if i == 0: 523 return b[4] 524 if i == 1: 525 return b[5] 526 return np.array([b[4], b[5]])
Get the bounds [zmin,zmax]
. Can specify upper or lower with i (0,1).
528 def diagonal_size(self) -> float: 529 """Get the length of the diagonal of the bounding box.""" 530 b = self.bounds() 531 return np.sqrt((b[1] - b[0])**2 + (b[3] - b[2])**2 + (b[5] - b[4])**2)
Get the length of the diagonal of the bounding box.
533 def average_size(self) -> float: 534 """ 535 Calculate and return the average size of the object. 536 This is the mean of the vertex distances from the center of mass. 537 """ 538 coords = self.vertices 539 cm = np.mean(coords, axis=0) 540 if coords.shape[0] == 0: 541 return 0.0 542 cc = coords - cm 543 return np.mean(np.linalg.norm(cc, axis=1))
Calculate and return the average size of the object. This is the mean of the vertex distances from the center of mass.
545 def center_of_mass(self) -> np.ndarray: 546 """Get the center of mass of the object.""" 547 if isinstance(self, (vedo.RectilinearGrid, vedo.Volume)): 548 return np.array(self.dataset.GetCenter()) 549 cmf = vtki.new("CenterOfMass") 550 cmf.SetInputData(self.dataset) 551 cmf.Update() 552 c = cmf.GetCenter() 553 return np.array(c)
Get the center of mass of the object.
555 def copy_data_from(self, obj: Any) -> Self: 556 """Copy all data (point and cell data) from this input object""" 557 self.dataset.GetPointData().PassData(obj.dataset.GetPointData()) 558 self.dataset.GetCellData().PassData(obj.dataset.GetCellData()) 559 self.pipeline = utils.OperationNode( 560 "copy_data_from", 561 parents=[self, obj], 562 comment=f"{obj.__class__.__name__}", 563 shape="note", 564 c="#ccc5b9", 565 ) 566 return self
Copy all data (point and cell data) from this input object
568 def inputdata(self): 569 """Obsolete, use `.dataset` instead.""" 570 colors.printc("WARNING: 'inputdata()' is obsolete, use '.dataset' instead.", c="y") 571 return self.dataset
Obsolete, use .dataset
instead.
573 @property 574 def npoints(self): 575 """Retrieve the number of points (or vertices).""" 576 return self.dataset.GetNumberOfPoints()
Retrieve the number of points (or vertices).
578 @property 579 def nvertices(self): 580 """Retrieve the number of vertices (or points).""" 581 return self.dataset.GetNumberOfPoints()
Retrieve the number of vertices (or points).
583 @property 584 def ncells(self): 585 """Retrieve the number of cells.""" 586 return self.dataset.GetNumberOfCells()
Retrieve the number of cells.
588 def points(self, pts=None): 589 """Obsolete, use `self.vertices` or `self.coordinates` instead.""" 590 if pts is None: ### getter 591 592 if warnings["points_getter"]: 593 colors.printc(warnings["points_getter"], c="y") 594 warnings["points_getter"] = "" 595 return self.vertices 596 597 else: ### setter 598 599 if warnings["points_setter"]: 600 colors.printc(warnings["points_setter"], c="y") 601 warnings["points_setter"] = "" 602 603 pts = np.asarray(pts, dtype=np.float32) 604 605 if pts.ndim == 1: 606 ### getter by point index ################### 607 indices = pts.astype(int) 608 vpts = self.dataset.GetPoints() 609 arr = utils.vtk2numpy(vpts.GetData()) 610 return arr[indices] ########### 611 612 ### setter #################################### 613 if pts.shape[1] == 2: 614 pts = np.c_[pts, np.zeros(pts.shape[0], dtype=np.float32)] 615 arr = utils.numpy2vtk(pts, dtype=np.float32) 616 617 vpts = self.dataset.GetPoints() 618 vpts.SetData(arr) 619 vpts.Modified() 620 # reset mesh to identity matrix position/rotation: 621 self.point_locator = None 622 self.cell_locator = None 623 self.line_locator = None 624 self.transform = LinearTransform() 625 return self
Obsolete, use self.vertices
or self.coordinates
instead.
627 @property 628 def cell_centers(self): 629 """ 630 Get the coordinates of the cell centers. 631 632 Examples: 633 - [delaunay2d.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/delaunay2d.py) 634 635 See also: `CellCenters()`. 636 """ 637 vcen = vtki.new("CellCenters") 638 vcen.CopyArraysOff() 639 vcen.SetInputData(self.dataset) 640 vcen.Update() 641 return utils.vtk2numpy(vcen.GetOutput().GetPoints().GetData())
643 @property 644 def lines(self): 645 """ 646 Get lines connectivity ids as a python array 647 formatted as `[[id0,id1], [id3,id4], ...]` 648 649 See also: `lines_as_flat_array()`. 650 """ 651 # Get cell connettivity ids as a 1D array. The vtk format is: 652 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 653 try: 654 arr1d = utils.vtk2numpy(self.dataset.GetLines().GetData()) 655 except AttributeError: 656 return np.array([]) 657 i = 0 658 conn = [] 659 n = len(arr1d) 660 for _ in range(n): 661 cell = [arr1d[i + k + 1] for k in range(arr1d[i])] 662 conn.append(cell) 663 i += arr1d[i] + 1 664 if i >= n: 665 break 666 667 return conn # cannot always make a numpy array of it!
Get lines connectivity ids as a python array
formatted as [[id0,id1], [id3,id4], ...]
See also: lines_as_flat_array()
.
669 @property 670 def lines_as_flat_array(self): 671 """ 672 Get lines connectivity ids as a 1D numpy array. 673 Format is e.g. [2, 10,20, 3, 10,11,12, 2, 70,80, ...] 674 675 See also: `lines()`. 676 """ 677 try: 678 return utils.vtk2numpy(self.dataset.GetLines().GetData()) 679 except AttributeError: 680 return np.array([])
Get lines connectivity ids as a 1D numpy array. Format is e.g. [2, 10,20, 3, 10,11,12, 2, 70,80, ...]
See also: lines()
.
682 def mark_boundaries(self) -> Self: 683 """ 684 Mark cells and vertices if they lie on a boundary. 685 A new array called `BoundaryCells` is added to the object. 686 """ 687 mb = vtki.new("MarkBoundaryFilter") 688 mb.SetInputData(self.dataset) 689 mb.Update() 690 self.dataset.DeepCopy(mb.GetOutput()) 691 self.pipeline = utils.OperationNode("mark_boundaries", parents=[self]) 692 return self
Mark cells and vertices if they lie on a boundary.
A new array called BoundaryCells
is added to the object.
694 def find_cells_in_bounds(self, xbounds=(), ybounds=(), zbounds=()) -> np.ndarray: 695 """ 696 Find cells that are within the specified bounds. 697 """ 698 try: 699 xbounds = list(xbounds.bounds()) 700 except AttributeError: 701 pass 702 703 if len(xbounds) == 6: 704 bnds = xbounds 705 else: 706 bnds = list(self.bounds()) 707 if len(xbounds) == 2: 708 bnds[0] = xbounds[0] 709 bnds[1] = xbounds[1] 710 if len(ybounds) == 2: 711 bnds[2] = ybounds[0] 712 bnds[3] = ybounds[1] 713 if len(zbounds) == 2: 714 bnds[4] = zbounds[0] 715 bnds[5] = zbounds[1] 716 717 cell_ids = vtki.vtkIdList() 718 if not self.cell_locator: 719 self.cell_locator = vtki.new("CellTreeLocator") 720 self.cell_locator.SetDataSet(self.dataset) 721 self.cell_locator.BuildLocator() 722 self.cell_locator.FindCellsWithinBounds(bnds, cell_ids) 723 cids = [] 724 for i in range(cell_ids.GetNumberOfIds()): 725 cid = cell_ids.GetId(i) 726 cids.append(cid) 727 return np.array(cids)
Find cells that are within the specified bounds.
729 def find_cells_along_line(self, p0, p1, tol=0.001) -> np.ndarray: 730 """ 731 Find cells that are intersected by a line segment. 732 """ 733 cell_ids = vtki.vtkIdList() 734 if not self.cell_locator: 735 self.cell_locator = vtki.new("CellTreeLocator") 736 self.cell_locator.SetDataSet(self.dataset) 737 self.cell_locator.BuildLocator() 738 self.cell_locator.FindCellsAlongLine(p0, p1, tol, cell_ids) 739 cids = [] 740 for i in range(cell_ids.GetNumberOfIds()): 741 cid = cell_ids.GetId(i) 742 cids.append(cid) 743 return np.array(cids)
Find cells that are intersected by a line segment.
745 def find_cells_along_plane(self, origin, normal, tol=0.001) -> np.ndarray: 746 """ 747 Find cells that are intersected by a plane. 748 """ 749 cell_ids = vtki.vtkIdList() 750 if not self.cell_locator: 751 self.cell_locator = vtki.new("CellTreeLocator") 752 self.cell_locator.SetDataSet(self.dataset) 753 self.cell_locator.BuildLocator() 754 self.cell_locator.FindCellsAlongPlane(origin, normal, tol, cell_ids) 755 cids = [] 756 for i in range(cell_ids.GetNumberOfIds()): 757 cid = cell_ids.GetId(i) 758 cids.append(cid) 759 return np.array(cids)
Find cells that are intersected by a plane.
761 def keep_cell_types(self, types=()): 762 """ 763 Extract cells of a specific type. 764 765 Check the VTK cell types here: 766 https://vtk.org/doc/nightly/html/vtkCellType_8h.html 767 """ 768 fe = vtki.new("ExtractCellsByType") 769 fe.SetInputData(self.dataset) 770 for t in types: 771 try: 772 if utils.is_integer(t): 773 it = t 774 else: 775 it = vtki.cell_types[t.upper()] 776 except KeyError: 777 vedo.logger.error(f"Cell type '{t}' not recognized") 778 continue 779 fe.AddCellType(it) 780 fe.Update() 781 self._update(fe.GetOutput()) 782 return self
Extract cells of a specific type.
Check the VTK cell types here: https://vtk.org/doc/nightly/html/vtkCellType_8h.html
784 def map_cells_to_points(self, arrays=(), move=False) -> Self: 785 """ 786 Interpolate cell data (i.e., data specified per cell or face) 787 into point data (i.e., data specified at each vertex). 788 The method of transformation is based on averaging the data values 789 of all cells using a particular point. 790 791 A custom list of arrays to be mapped can be passed in input. 792 793 Set `move=True` to delete the original `celldata` array. 794 """ 795 c2p = vtki.new("CellDataToPointData") 796 c2p.SetInputData(self.dataset) 797 if not move: 798 c2p.PassCellDataOn() 799 if arrays: 800 c2p.ClearCellDataArrays() 801 c2p.ProcessAllArraysOff() 802 for arr in arrays: 803 c2p.AddCellDataArray(arr) 804 else: 805 c2p.ProcessAllArraysOn() 806 c2p.Update() 807 self._update(c2p.GetOutput(), reset_locators=False) 808 self.mapper.SetScalarModeToUsePointData() 809 self.pipeline = utils.OperationNode("map_cells_to_points", parents=[self]) 810 return self
Interpolate cell data (i.e., data specified per cell or face) into point data (i.e., data specified at each vertex). The method of transformation is based on averaging the data values of all cells using a particular point.
A custom list of arrays to be mapped can be passed in input.
Set move=True
to delete the original celldata
array.
812 @property 813 def vertices(self): 814 """Return the vertices (points) coordinates.""" 815 try: 816 # for polydata and unstructured grid 817 varr = self.dataset.GetPoints().GetData() 818 except (AttributeError, TypeError): 819 try: 820 # for RectilinearGrid, StructuredGrid 821 vpts = vtki.vtkPoints() 822 self.dataset.GetPoints(vpts) 823 varr = vpts.GetData() 824 except (AttributeError, TypeError): 825 try: 826 # for ImageData 827 v2p = vtki.new("ImageToPoints") 828 v2p.SetInputData(self.dataset) 829 v2p.Update() 830 varr = v2p.GetOutput().GetPoints().GetData() 831 except AttributeError: 832 return np.array([]) 833 834 return utils.vtk2numpy(varr)
Return the vertices (points) coordinates.
855 @property 856 def coordinates(self): 857 """Return the vertices (points) coordinates. Same as `vertices`.""" 858 return self.vertices
Return the vertices (points) coordinates. Same as vertices
.
865 @property 866 def cells_as_flat_array(self): 867 """ 868 Get cell connectivity ids as a 1D numpy array. 869 Format is e.g. [3, 10,20,30 4, 10,11,12,13 ...] 870 """ 871 try: 872 # valid for unstructured grid 873 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 874 except AttributeError: 875 # valid for polydata 876 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 877 return arr1d
Get cell connectivity ids as a 1D numpy array. Format is e.g. [3, 10,20,30 4, 10,11,12,13 ...]
879 @property 880 def cells(self): 881 """ 882 Get the cells connectivity ids as a numpy array. 883 884 The output format is: `[[id0 ... idn], [id0 ... idm], etc]`. 885 """ 886 try: 887 # valid for unstructured grid 888 arr1d = utils.vtk2numpy(self.dataset.GetCells().GetData()) 889 except AttributeError: 890 try: 891 # valid for polydata 892 arr1d = utils.vtk2numpy(self.dataset.GetPolys().GetData()) 893 except AttributeError: 894 vedo.logger.warning(f"Cannot get cells for {type(self)}") 895 return np.array([]) 896 897 # Get cell connettivity ids as a 1D array. vtk format is: 898 # [nids1, id0 ... idn, niids2, id0 ... idm, etc]. 899 i = 0 900 conn = [] 901 n = len(arr1d) 902 if n: 903 while True: 904 cell = [arr1d[i + k] for k in range(1, arr1d[i] + 1)] 905 conn.append(cell) 906 i += arr1d[i] + 1 907 if i >= n: 908 break 909 return conn
Get the cells connectivity ids as a numpy array.
The output format is: [[id0 ... idn], [id0 ... idm], etc]
.
911 def map_points_to_cells(self, arrays=(), move=False) -> Self: 912 """ 913 Interpolate point data (i.e., data specified per point or vertex) 914 into cell data (i.e., data specified per cell). 915 The method of transformation is based on averaging the data values 916 of all points defining a particular cell. 917 918 A custom list of arrays to be mapped can be passed in input. 919 920 Set `move=True` to delete the original `pointdata` array. 921 922 Examples: 923 - [mesh_map2cell.py](https://github.com/marcomusy/vedo/tree/master/examples/basic/mesh_map2cell.py) 924 """ 925 p2c = vtki.new("PointDataToCellData") 926 p2c.SetInputData(self.dataset) 927 if not move: 928 p2c.PassPointDataOn() 929 if arrays: 930 p2c.ClearPointDataArrays() 931 p2c.ProcessAllArraysOff() 932 for arr in arrays: 933 p2c.AddPointDataArray(arr) 934 else: 935 p2c.ProcessAllArraysOn() 936 p2c.Update() 937 self._update(p2c.GetOutput(), reset_locators=False) 938 self.mapper.SetScalarModeToUseCellData() 939 self.pipeline = utils.OperationNode("map_points_to_cells", parents=[self]) 940 return self
Interpolate point data (i.e., data specified per point or vertex) into cell data (i.e., data specified per cell). The method of transformation is based on averaging the data values of all points defining a particular cell.
A custom list of arrays to be mapped can be passed in input.
Set move=True
to delete the original pointdata
array.
Examples:
942 def resample_data_from(self, source, tol=None, categorical=False) -> Self: 943 """ 944 Resample point and cell data from another dataset. 945 The output has the same structure but its point data have 946 the resampled values from target. 947 948 Use `tol` to set the tolerance used to compute whether 949 a point in the source is in a cell of the current object. 950 Points without resampled values, and their cells, are marked as blank. 951 If the data is categorical, then the resulting data will be determined 952 by a nearest neighbor interpolation scheme. 953 954 Example: 955 ```python 956 from vedo import * 957 m1 = Mesh(dataurl+'bunny.obj')#.add_gaussian_noise(0.1) 958 pts = m1.vertices 959 ces = m1.cell_centers 960 m1.pointdata["xvalues"] = np.power(pts[:,0], 3) 961 m1.celldata["yvalues"] = np.power(ces[:,1], 3) 962 m2 = Mesh(dataurl+'bunny.obj') 963 m2.resample_data_from(m1) 964 # print(m2.pointdata["xvalues"]) 965 show(m1, m2 , N=2, axes=1) 966 ``` 967 """ 968 rs = vtki.new("ResampleWithDataSet") 969 rs.SetInputData(self.dataset) 970 rs.SetSourceData(source.dataset) 971 972 rs.SetPassPointArrays(True) 973 rs.SetPassCellArrays(True) 974 rs.SetPassFieldArrays(True) 975 rs.SetCategoricalData(categorical) 976 977 rs.SetComputeTolerance(True) 978 if tol: 979 rs.SetComputeTolerance(False) 980 rs.SetTolerance(tol) 981 rs.Update() 982 self._update(rs.GetOutput(), reset_locators=False) 983 self.pipeline = utils.OperationNode( 984 "resample_data_from", 985 comment=f"{source.__class__.__name__}", 986 parents=[self, source], 987 ) 988 return self
Resample point and cell data from another dataset. The output has the same structure but its point data have the resampled values from target.
Use tol
to set the tolerance used to compute whether
a point in the source is in a cell of the current object.
Points without resampled values, and their cells, are marked as blank.
If the data is categorical, then the resulting data will be determined
by a nearest neighbor interpolation scheme.
Example:
from vedo import *
m1 = Mesh(dataurl+'bunny.obj')#.add_gaussian_noise(0.1)
pts = m1.vertices
ces = m1.cell_centers
m1.pointdata["xvalues"] = np.power(pts[:,0], 3)
m1.celldata["yvalues"] = np.power(ces[:,1], 3)
m2 = Mesh(dataurl+'bunny.obj')
m2.resample_data_from(m1)
# print(m2.pointdata["xvalues"])
show(m1, m2 , N=2, axes=1)
990 def interpolate_data_from( 991 self, 992 source, 993 radius=None, 994 n=None, 995 kernel="shepard", 996 exclude=("Normals",), 997 on="points", 998 null_strategy=1, 999 null_value=0, 1000 ) -> Self: 1001 """ 1002 Interpolate over source to port its data onto the current object using various kernels. 1003 1004 If n (number of closest points to use) is set then radius value is ignored. 1005 1006 Check out also: 1007 `probe()` which in many cases can be faster. 1008 1009 Arguments: 1010 kernel : (str) 1011 available kernels are [shepard, gaussian, linear] 1012 null_strategy : (int) 1013 specify a strategy to use when encountering a "null" point 1014 during the interpolation process. Null points occur when the local neighborhood 1015 (of nearby points to interpolate from) is empty. 1016 1017 - Case 0: an output array is created that marks points 1018 as being valid (=1) or null (invalid =0), and the null_value is set as well 1019 - Case 1: the output data value(s) are set to the provided null_value 1020 - Case 2: simply use the closest point to perform the interpolation. 1021 null_value : (float) 1022 see above. 1023 1024 Examples: 1025 - [interpolate_scalar1.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar1.py) 1026 - [interpolate_scalar3.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar3.py) 1027 - [interpolate_scalar4.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/interpolate_scalar4.py) 1028 - [image_probe.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/image_probe.py) 1029 1030 ![](https://vedo.embl.es/images/advanced/interpolateMeshArray.png) 1031 """ 1032 if radius is None and not n: 1033 vedo.logger.error("in interpolate_data_from(): please set either radius or n") 1034 raise RuntimeError 1035 1036 if on == "points": 1037 points = source.dataset 1038 elif on == "cells": 1039 c2p = vtki.new("CellDataToPointData") 1040 c2p.SetInputData(source.dataset) 1041 c2p.Update() 1042 points = c2p.GetOutput() 1043 else: 1044 vedo.logger.error("in interpolate_data_from(), on must be on points or cells") 1045 raise RuntimeError() 1046 1047 locator = vtki.new("PointLocator") 1048 locator.SetDataSet(points) 1049 locator.BuildLocator() 1050 1051 if kernel.lower() == "shepard": 1052 kern = vtki.new("ShepardKernel") 1053 kern.SetPowerParameter(2) 1054 elif kernel.lower() == "gaussian": 1055 kern = vtki.new("GaussianKernel") 1056 kern.SetSharpness(2) 1057 elif kernel.lower() == "linear": 1058 kern = vtki.new("LinearKernel") 1059 else: 1060 vedo.logger.error("available kernels are: [shepard, gaussian, linear]") 1061 raise RuntimeError() 1062 1063 if n: 1064 kern.SetNumberOfPoints(n) 1065 kern.SetKernelFootprintToNClosest() 1066 else: 1067 kern.SetRadius(radius) 1068 kern.SetKernelFootprintToRadius() 1069 1070 interpolator = vtki.new("PointInterpolator") 1071 interpolator.SetInputData(self.dataset) 1072 interpolator.SetSourceData(points) 1073 interpolator.SetKernel(kern) 1074 interpolator.SetLocator(locator) 1075 interpolator.PassFieldArraysOn() 1076 interpolator.SetNullPointsStrategy(null_strategy) 1077 interpolator.SetNullValue(null_value) 1078 interpolator.SetValidPointsMaskArrayName("ValidPointMask") 1079 for ex in exclude: 1080 interpolator.AddExcludedArray(ex) 1081 interpolator.Update() 1082 1083 if on == "cells": 1084 p2c = vtki.new("PointDataToCellData") 1085 p2c.SetInputData(interpolator.GetOutput()) 1086 p2c.Update() 1087 cpoly = p2c.GetOutput() 1088 else: 1089 cpoly = interpolator.GetOutput() 1090 1091 self._update(cpoly, reset_locators=False) 1092 1093 self.pipeline = utils.OperationNode("interpolate_data_from", parents=[self, source]) 1094 return self
Interpolate over source to port its data onto the current object using various kernels.
If n (number of closest points to use) is set then radius value is ignored.
Check out also:
probe()
which in many cases can be faster.
Arguments:
- kernel : (str) available kernels are [shepard, gaussian, linear]
null_strategy : (int) specify a strategy to use when encountering a "null" point during the interpolation process. Null points occur when the local neighborhood (of nearby points to interpolate from) is empty.
- Case 0: an output array is created that marks points as being valid (=1) or null (invalid =0), and the null_value is set as well
- Case 1: the output data value(s) are set to the provided null_value
- Case 2: simply use the closest point to perform the interpolation.
- null_value : (float) see above.
Examples:
1096 def add_ids(self) -> Self: 1097 """ 1098 Generate point and cell ids arrays. 1099 1100 Two new arrays are added to the mesh: `PointID` and `CellID`. 1101 """ 1102 ids = vtki.new("IdFilter") 1103 ids.SetInputData(self.dataset) 1104 ids.PointIdsOn() 1105 ids.CellIdsOn() 1106 ids.FieldDataOff() 1107 ids.SetPointIdsArrayName("PointID") 1108 ids.SetCellIdsArrayName("CellID") 1109 ids.Update() 1110 self._update(ids.GetOutput(), reset_locators=False) 1111 self.pipeline = utils.OperationNode("add_ids", parents=[self]) 1112 return self
Generate point and cell ids arrays.
Two new arrays are added to the mesh: PointID
and CellID
.
1114 def gradient(self, input_array=None, on="points", fast=False) -> np.ndarray: 1115 """ 1116 Compute and return the gradiend of the active scalar field as a numpy array. 1117 1118 Arguments: 1119 input_array : (str) 1120 array of the scalars to compute the gradient, 1121 if None the current active array is selected 1122 on : (str) 1123 compute either on 'points' or 'cells' data 1124 fast : (bool) 1125 if True, will use a less accurate algorithm 1126 that performs fewer derivative calculations (and is therefore faster). 1127 1128 Examples: 1129 - [isolines.py](https://github.com/marcomusy/vedo/tree/master/examples/advanced/isolines.py) 1130 1131 ![](https://user-images.githubusercontent.com/32848391/72433087-f00a8780-3798-11ea-9778-991f0abeca70.png) 1132 """ 1133 gra = vtki.new("GradientFilter") 1134 if on.startswith("p"): 1135 varr = self.dataset.GetPointData() 1136 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1137 elif on.startswith("c"): 1138 varr = self.dataset.GetCellData() 1139 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1140 else: 1141 vedo.logger.error(f"in gradient: unknown option {on}") 1142 raise RuntimeError 1143 1144 if input_array is None: 1145 if varr.GetScalars(): 1146 input_array = varr.GetScalars().GetName() 1147 else: 1148 vedo.logger.error(f"in gradient: no scalars found for {on}") 1149 raise RuntimeError 1150 1151 gra.SetInputData(self.dataset) 1152 gra.SetInputScalars(tp, input_array) 1153 gra.SetResultArrayName("Gradient") 1154 gra.SetFasterApproximation(fast) 1155 gra.ComputeDivergenceOff() 1156 gra.ComputeVorticityOff() 1157 gra.ComputeGradientOn() 1158 gra.Update() 1159 if on.startswith("p"): 1160 gvecs = utils.vtk2numpy(gra.GetOutput().GetPointData().GetArray("Gradient")) 1161 else: 1162 gvecs = utils.vtk2numpy(gra.GetOutput().GetCellData().GetArray("Gradient")) 1163 return gvecs
Compute and return the gradiend of the active scalar field as a numpy array.
Arguments:
- input_array : (str) array of the scalars to compute the gradient, if None the current active array is selected
- on : (str) compute either on 'points' or 'cells' data
- fast : (bool) if True, will use a less accurate algorithm that performs fewer derivative calculations (and is therefore faster).
Examples:
1165 def divergence(self, array_name=None, on="points", fast=False) -> np.ndarray: 1166 """ 1167 Compute and return the divergence of a vector field as a numpy array. 1168 1169 Arguments: 1170 array_name : (str) 1171 name of the array of vectors to compute the divergence, 1172 if None the current active array is selected 1173 on : (str) 1174 compute either on 'points' or 'cells' data 1175 fast : (bool) 1176 if True, will use a less accurate algorithm 1177 that performs fewer derivative calculations (and is therefore faster). 1178 """ 1179 div = vtki.new("GradientFilter") 1180 if on.startswith("p"): 1181 varr = self.dataset.GetPointData() 1182 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1183 elif on.startswith("c"): 1184 varr = self.dataset.GetCellData() 1185 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1186 else: 1187 vedo.logger.error(f"in divergence(): unknown option {on}") 1188 raise RuntimeError 1189 1190 if array_name is None: 1191 if varr.GetVectors(): 1192 array_name = varr.GetVectors().GetName() 1193 else: 1194 vedo.logger.error(f"in divergence(): no vectors found for {on}") 1195 raise RuntimeError 1196 1197 div.SetInputData(self.dataset) 1198 div.SetInputScalars(tp, array_name) 1199 div.ComputeDivergenceOn() 1200 div.ComputeGradientOff() 1201 div.ComputeVorticityOff() 1202 div.SetDivergenceArrayName("Divergence") 1203 div.SetFasterApproximation(fast) 1204 div.Update() 1205 if on.startswith("p"): 1206 dvecs = utils.vtk2numpy(div.GetOutput().GetPointData().GetArray("Divergence")) 1207 else: 1208 dvecs = utils.vtk2numpy(div.GetOutput().GetCellData().GetArray("Divergence")) 1209 return dvecs
Compute and return the divergence of a vector field as a numpy array.
Arguments:
- array_name : (str) name of the array of vectors to compute the divergence, if None the current active array is selected
- on : (str) compute either on 'points' or 'cells' data
- fast : (bool) if True, will use a less accurate algorithm that performs fewer derivative calculations (and is therefore faster).
1211 def vorticity(self, array_name=None, on="points", fast=False) -> np.ndarray: 1212 """ 1213 Compute and return the vorticity of a vector field as a numpy array. 1214 1215 Arguments: 1216 array_name : (str) 1217 name of the array to compute the vorticity, 1218 if None the current active array is selected 1219 on : (str) 1220 compute either on 'points' or 'cells' data 1221 fast : (bool) 1222 if True, will use a less accurate algorithm 1223 that performs fewer derivative calculations (and is therefore faster). 1224 """ 1225 vort = vtki.new("GradientFilter") 1226 if on.startswith("p"): 1227 varr = self.dataset.GetPointData() 1228 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_POINTS 1229 elif on.startswith("c"): 1230 varr = self.dataset.GetCellData() 1231 tp = vtki.vtkDataObject.FIELD_ASSOCIATION_CELLS 1232 else: 1233 vedo.logger.error(f"in vorticity(): unknown option {on}") 1234 raise RuntimeError 1235 1236 if array_name is None: 1237 if varr.GetVectors(): 1238 array_name = varr.GetVectors().GetName() 1239 else: 1240 vedo.logger.error(f"in vorticity(): no vectors found for {on}") 1241 raise RuntimeError 1242 1243 vort.SetInputData(self.dataset) 1244 vort.SetInputScalars(tp, array_name) 1245 vort.ComputeDivergenceOff() 1246 vort.ComputeGradientOff() 1247 vort.ComputeVorticityOn() 1248 vort.SetVorticityArrayName("Vorticity") 1249 vort.SetFasterApproximation(fast) 1250 vort.Update() 1251 if on.startswith("p"): 1252 vvecs = utils.vtk2numpy(vort.GetOutput().GetPointData().GetArray("Vorticity")) 1253 else: 1254 vvecs = utils.vtk2numpy(vort.GetOutput().GetCellData().GetArray("Vorticity")) 1255 return vvecs
Compute and return the vorticity of a vector field as a numpy array.
Arguments:
- array_name : (str) name of the array to compute the vorticity, if None the current active array is selected
- on : (str) compute either on 'points' or 'cells' data
- fast : (bool) if True, will use a less accurate algorithm that performs fewer derivative calculations (and is therefore faster).
1257 def probe(self, source) -> Self: 1258 """ 1259 Takes a data set and probes its scalars at the specified points in space. 1260 1261 Note that a mask is also output with valid/invalid points which can be accessed 1262 with `mesh.pointdata['ValidPointMask']`. 1263 1264 Check out also: 1265 `interpolate_data_from()` 1266 1267 Examples: 1268 - [probe_points.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/probe_points.py) 1269 1270 ![](https://vedo.embl.es/images/volumetric/probePoints.png) 1271 """ 1272 probe_filter = vtki.new("ProbeFilter") 1273 probe_filter.SetSourceData(source.dataset) 1274 probe_filter.SetInputData(self.dataset) 1275 probe_filter.Update() 1276 self._update(probe_filter.GetOutput(), reset_locators=False) 1277 self.pipeline = utils.OperationNode("probe", parents=[self, source]) 1278 self.pointdata.rename("vtkValidPointMask", "ValidPointMask") 1279 return self
Takes a data set and probes its scalars at the specified points in space.
Note that a mask is also output with valid/invalid points which can be accessed
with mesh.pointdata['ValidPointMask']
.
Check out also:
Examples:
1281 def compute_cell_size(self) -> Self: 1282 """ 1283 Add to this object a cell data array 1284 containing the area, volume and edge length 1285 of the cells (when applicable to the object type). 1286 1287 Array names are: `Area`, `Volume`, `Length`. 1288 """ 1289 csf = vtki.new("CellSizeFilter") 1290 csf.SetInputData(self.dataset) 1291 csf.SetComputeArea(1) 1292 csf.SetComputeVolume(1) 1293 csf.SetComputeLength(1) 1294 csf.SetComputeVertexCount(0) 1295 csf.SetAreaArrayName("Area") 1296 csf.SetVolumeArrayName("Volume") 1297 csf.SetLengthArrayName("Length") 1298 csf.Update() 1299 self._update(csf.GetOutput(), reset_locators=False) 1300 return self
Add to this object a cell data array containing the area, volume and edge length of the cells (when applicable to the object type).
Array names are: Area
, Volume
, Length
.
1302 def generate_random_data(self) -> Self: 1303 """Fill a dataset with random attributes""" 1304 gen = vtki.new("RandomAttributeGenerator") 1305 gen.SetInputData(self.dataset) 1306 gen.GenerateAllDataOn() 1307 gen.SetDataTypeToFloat() 1308 gen.GeneratePointNormalsOff() 1309 gen.GeneratePointTensorsOn() 1310 gen.GenerateCellScalarsOn() 1311 gen.Update() 1312 self._update(gen.GetOutput(), reset_locators=False) 1313 self.pipeline = utils.OperationNode("generate_random_data", parents=[self]) 1314 return self
Fill a dataset with random attributes
1316 def integrate_data(self) -> dict: 1317 """ 1318 Integrate point and cell data arrays while computing length, 1319 area or volume of the domain. It works for 1D, 2D or 3D cells. 1320 1321 For volumetric datasets, this filter ignores all but 3D cells. 1322 It will not compute the volume contained in a closed surface. 1323 1324 Returns a dictionary with keys: `pointdata`, `celldata`, `metadata`, 1325 which contain the integration result for the corresponding attributes. 1326 1327 Examples: 1328 ```python 1329 from vedo import * 1330 surf = Sphere(res=100) 1331 surf.pointdata['scalars'] = np.ones(surf.npoints) 1332 data = surf.integrate_data() 1333 print(data['pointdata']['scalars'], "is equal to 4pi", 4*np.pi) 1334 ``` 1335 1336 ```python 1337 from vedo import * 1338 1339 xcoords1 = np.arange(0, 2.2, 0.2) 1340 xcoords2 = sqrt(np.arange(0, 4.2, 0.2)) 1341 1342 ycoords = np.arange(0, 1.2, 0.2) 1343 1344 surf1 = Grid(s=(xcoords1, ycoords)).rotate_y(-45).lw(2) 1345 surf2 = Grid(s=(xcoords2, ycoords)).rotate_y(-45).lw(2) 1346 1347 surf1.pointdata['scalars'] = surf1.vertices[:,2] 1348 surf2.pointdata['scalars'] = surf2.vertices[:,2] 1349 1350 data1 = surf1.integrate_data() 1351 data2 = surf2.integrate_data() 1352 1353 print(data1['pointdata']['scalars'], 1354 "is equal to", 1355 data2['pointdata']['scalars'], 1356 "even if the grids are different!", 1357 "(= the volume under the surface)" 1358 ) 1359 show(surf1, surf2, N=2, axes=1).close() 1360 ``` 1361 """ 1362 vinteg = vtki.new("IntegrateAttributes") 1363 vinteg.SetInputData(self.dataset) 1364 vinteg.Update() 1365 ugrid = vedo.UnstructuredGrid(vinteg.GetOutput()) 1366 data = dict( 1367 pointdata=ugrid.pointdata.todict(), 1368 celldata=ugrid.celldata.todict(), 1369 metadata=ugrid.metadata.todict(), 1370 ) 1371 return data
Integrate point and cell data arrays while computing length, area or volume of the domain. It works for 1D, 2D or 3D cells.
For volumetric datasets, this filter ignores all but 3D cells. It will not compute the volume contained in a closed surface.
Returns a dictionary with keys: pointdata
, celldata
, metadata
,
which contain the integration result for the corresponding attributes.
Examples:
from vedo import * surf = Sphere(res=100) surf.pointdata['scalars'] = np.ones(surf.npoints) data = surf.integrate_data() print(data['pointdata']['scalars'], "is equal to 4pi", 4*np.pi)
from vedo import * xcoords1 = np.arange(0, 2.2, 0.2) xcoords2 = sqrt(np.arange(0, 4.2, 0.2)) ycoords = np.arange(0, 1.2, 0.2) surf1 = Grid(s=(xcoords1, ycoords)).rotate_y(-45).lw(2) surf2 = Grid(s=(xcoords2, ycoords)).rotate_y(-45).lw(2) surf1.pointdata['scalars'] = surf1.vertices[:,2] surf2.pointdata['scalars'] = surf2.vertices[:,2] data1 = surf1.integrate_data() data2 = surf2.integrate_data() print(data1['pointdata']['scalars'], "is equal to", data2['pointdata']['scalars'], "even if the grids are different!", "(= the volume under the surface)" ) show(surf1, surf2, N=2, axes=1).close()
1373 def write(self, filename, binary=True) -> None: 1374 """Write object to file.""" 1375 out = vedo.file_io.write(self, filename, binary) 1376 out.pipeline = utils.OperationNode( 1377 "write", parents=[self], comment=filename[:15], shape="folder", c="#8a817c" 1378 )
Write object to file.
1380 def tomesh(self, bounds=(), shrink=0) -> "vedo.Mesh": 1381 """ 1382 Extract boundary geometry from dataset (or convert data to polygonal type). 1383 1384 Two new arrays are added to the mesh: `OriginalCellIds` and `OriginalPointIds` 1385 to keep track of the original mesh elements. 1386 1387 Arguments: 1388 bounds : (list) 1389 specify a sub-region to extract 1390 shrink : (float) 1391 shrink the cells to a fraction of their original size 1392 """ 1393 geo = vtki.new("GeometryFilter") 1394 1395 if shrink: 1396 sf = vtki.new("ShrinkFilter") 1397 sf.SetInputData(self.dataset) 1398 sf.SetShrinkFactor(shrink) 1399 sf.Update() 1400 geo.SetInputData(sf.GetOutput()) 1401 else: 1402 geo.SetInputData(self.dataset) 1403 1404 geo.SetPassThroughCellIds(1) 1405 geo.SetPassThroughPointIds(1) 1406 geo.SetOriginalCellIdsName("OriginalCellIds") 1407 geo.SetOriginalPointIdsName("OriginalPointIds") 1408 geo.SetNonlinearSubdivisionLevel(1) 1409 # geo.MergingOff() # crashes on StructuredGrids 1410 if bounds: 1411 geo.SetExtent(bounds) 1412 geo.ExtentClippingOn() 1413 geo.Update() 1414 msh = vedo.mesh.Mesh(geo.GetOutput()) 1415 msh.pipeline = utils.OperationNode("tomesh", parents=[self], c="#9e2a2b") 1416 return msh
Extract boundary geometry from dataset (or convert data to polygonal type).
Two new arrays are added to the mesh: OriginalCellIds
and OriginalPointIds
to keep track of the original mesh elements.
Arguments:
- bounds : (list) specify a sub-region to extract
- shrink : (float) shrink the cells to a fraction of their original size
1418 def signed_distance(self, dims=(20, 20, 20), bounds=None, invert=False, max_radius=None) -> "vedo.Volume": 1419 """ 1420 Compute the `Volume` object whose voxels contains the signed distance from 1421 the object. The calling object must have "Normals" defined. 1422 1423 Arguments: 1424 bounds : (list, actor) 1425 bounding box sizes 1426 dims : (list) 1427 dimensions (nr. of voxels) of the output volume. 1428 invert : (bool) 1429 flip the sign 1430 max_radius : (float) 1431 specify how far out to propagate distance calculation 1432 1433 Examples: 1434 - [distance2mesh.py](https://github.com/marcomusy/vedo/blob/master/examples/basic/distance2mesh.py) 1435 1436 ![](https://vedo.embl.es/images/basic/distance2mesh.png) 1437 """ 1438 if bounds is None: 1439 bounds = self.bounds() 1440 if max_radius is None: 1441 max_radius = self.diagonal_size() / 2 1442 dist = vtki.new("SignedDistance") 1443 dist.SetInputData(self.dataset) 1444 dist.SetRadius(max_radius) 1445 dist.SetBounds(bounds) 1446 dist.SetDimensions(dims) 1447 dist.Update() 1448 img = dist.GetOutput() 1449 if invert: 1450 mat = vtki.new("ImageMathematics") 1451 mat.SetInput1Data(img) 1452 mat.SetOperationToMultiplyByK() 1453 mat.SetConstantK(-1) 1454 mat.Update() 1455 img = mat.GetOutput() 1456 1457 vol = vedo.Volume(img) 1458 vol.name = "SignedDistanceVolume" 1459 vol.pipeline = utils.OperationNode( 1460 "signed_distance", 1461 parents=[self], 1462 comment=f"dims={tuple(vol.dimensions())}", 1463 c="#e9c46a:#0096c7", 1464 ) 1465 return vol
Compute the Volume
object whose voxels contains the signed distance from
the object. The calling object must have "Normals" defined.
Arguments:
- bounds : (list, actor) bounding box sizes
- dims : (list) dimensions (nr. of voxels) of the output volume.
- invert : (bool) flip the sign
- max_radius : (float) specify how far out to propagate distance calculation
Examples:
1467 def unsigned_distance( 1468 self, dims=(25,25,25), bounds=(), max_radius=0, cap_value=0) -> "vedo.Volume": 1469 """ 1470 Compute the `Volume` object whose voxels contains the unsigned distance. 1471 """ 1472 dist = vtki.new("UnsignedDistance") 1473 dist.SetInputData(self.dataset) 1474 dist.SetDimensions(dims) 1475 1476 if len(bounds) == 6: 1477 dist.SetBounds(bounds) 1478 # elif bounds == "auto": 1479 # dist.AdjustBoundsOn() 1480 else: 1481 dist.SetBounds(self.bounds()) 1482 if not max_radius: 1483 max_radius = self.diagonal_size() / 10 1484 dist.SetRadius(max_radius) 1485 1486 if self.point_locator: 1487 dist.SetLocator(self.point_locator) 1488 1489 if cap_value is not None: 1490 dist.CappingOn() 1491 dist.SetCapValue(cap_value) 1492 dist.SetOutputScalarTypeToFloat() 1493 dist.Update() 1494 vol = vedo.Volume(dist.GetOutput()) 1495 vol.name = "UnsignedDistanceVolume" 1496 vol.pipeline = utils.OperationNode( 1497 "unsigned_distance", parents=[self], c="#e9c46a:#0096c7") 1498 return vol
Compute the Volume
object whose voxels contains the unsigned distance.
1500 def smooth_data(self, 1501 niter=10, relaxation_factor=0.1, strategy=0, mask=None, 1502 exclude=("Normals", "TextureCoordinates"), 1503 ) -> Self: 1504 """ 1505 Smooth point attribute data using distance weighted Laplacian kernel. 1506 1507 The effect is to blur regions of high variation and emphasize low variation regions. 1508 1509 Arguments: 1510 niter : (int) 1511 number of iterations 1512 relaxation_factor : (float) 1513 relaxation factor controlling the amount of Laplacian smoothing applied 1514 strategy : (int) 1515 strategy to use for Laplacian smoothing 1516 - 0: use all points, all point data attributes are smoothed 1517 - 1: smooth all point attribute data except those on the boundary 1518 - 2: only point data connected to a boundary point are smoothed 1519 mask : (str, np.ndarray) 1520 array to be used as a mask (ignore then the strategy keyword) 1521 exclude : (list) 1522 list of arrays to be excluded from smoothing 1523 """ 1524 try: 1525 saf = vtki.new("AttributeSmoothingFilter") 1526 except: 1527 vedo.logger.error("smooth_data() only avaialble in VTK>=9.3.0") 1528 return self 1529 saf.SetInputData(self.dataset) 1530 saf.SetRelaxationFactor(relaxation_factor) 1531 saf.SetNumberOfIterations(niter) 1532 1533 for ex in exclude: 1534 saf.AddExcludedArray(ex) 1535 1536 saf.SetWeightsTypeToDistance2() 1537 1538 saf.SetSmoothingStrategy(strategy) 1539 if mask is not None: 1540 saf.SetSmoothingStrategyToSmoothingMask() 1541 if isinstance(mask, str): 1542 mask_ = self.dataset.GetPointData().GetArray(mask) 1543 if not mask_: 1544 vedo.logger.error(f"smooth_data(): mask array {mask} not found") 1545 return self 1546 mask_array = vtki.vtkUnsignedCharArray() 1547 mask_array.ShallowCopy(mask_) 1548 mask_array.SetName(mask_.GetName()) 1549 else: 1550 mask_array = utils.numpy2vtk(mask, dtype=np.uint8) 1551 saf.SetSmoothingMask(mask_array) 1552 1553 saf.Update() 1554 1555 self._update(saf.GetOutput()) 1556 self.pipeline = utils.OperationNode( 1557 "smooth_data", comment=f"strategy {strategy}", parents=[self], c="#9e2a2b" 1558 ) 1559 return self
Smooth point attribute data using distance weighted Laplacian kernel.
The effect is to blur regions of high variation and emphasize low variation regions.
Arguments:
- niter : (int) number of iterations
- relaxation_factor : (float) relaxation factor controlling the amount of Laplacian smoothing applied
- strategy : (int) strategy to use for Laplacian smoothing - 0: use all points, all point data attributes are smoothed - 1: smooth all point attribute data except those on the boundary - 2: only point data connected to a boundary point are smoothed
- mask : (str, np.ndarray) array to be used as a mask (ignore then the strategy keyword)
- exclude : (list) list of arrays to be excluded from smoothing
1561 def compute_streamlines( 1562 self, 1563 seeds: Any, 1564 integrator="rk4", 1565 direction="forward", 1566 initial_step_size=None, 1567 max_propagation=None, 1568 max_steps=10000, 1569 step_length=0, 1570 surface_constrained=False, 1571 compute_vorticity=False, 1572 ) -> Union["vedo.Lines", None]: 1573 """ 1574 Integrate a vector field to generate streamlines. 1575 1576 Arguments: 1577 seeds : (Mesh, Points, list) 1578 starting points of the streamlines 1579 integrator : (str) 1580 type of integration method to be used: 1581 - "rk2" (Runge-Kutta 2) 1582 - "rk4" (Runge-Kutta 4) 1583 - "rk45" (Runge-Kutta 45) 1584 direction : (str) 1585 direction of integration, either "forward", "backward" or "both" 1586 initial_step_size : (float) 1587 initial step size used for line integration 1588 max_propagation : (float) 1589 maximum length of a streamline expressed in absolute units 1590 max_steps : (int) 1591 maximum number of steps for a streamline 1592 step_length : (float) 1593 maximum length of a step expressed in absolute units 1594 surface_constrained : (bool) 1595 whether to stop integrating when the streamline leaves the surface 1596 compute_vorticity : (bool) 1597 whether to compute the vorticity at each streamline point 1598 """ 1599 b = self.dataset.GetBounds() 1600 size = (b[5]-b[4] + b[3]-b[2] + b[1]-b[0]) / 3 1601 if initial_step_size is None: 1602 initial_step_size = size / 1000.0 1603 1604 if max_propagation is None: 1605 max_propagation = size * 2 1606 1607 if utils.is_sequence(seeds): 1608 seeds = vedo.Points(seeds) 1609 1610 sti = vtki.new("StreamTracer") 1611 sti.SetSourceData(seeds.dataset) 1612 if isinstance(self, vedo.RectilinearGrid): 1613 sti.SetInputData(vedo.UnstructuredGrid(self.dataset).dataset) 1614 else: 1615 sti.SetInputDataObject(self.dataset) 1616 1617 sti.SetInitialIntegrationStep(initial_step_size) 1618 sti.SetComputeVorticity(compute_vorticity) 1619 sti.SetMaximumNumberOfSteps(max_steps) 1620 sti.SetMaximumPropagation(max_propagation) 1621 sti.SetSurfaceStreamlines(surface_constrained) 1622 if step_length: 1623 sti.SetMaximumIntegrationStep(step_length) 1624 1625 if "for" in direction: 1626 sti.SetIntegrationDirectionToForward() 1627 elif "back" in direction: 1628 sti.SetIntegrationDirectionToBackward() 1629 elif "both" in direction: 1630 sti.SetIntegrationDirectionToBoth() 1631 else: 1632 vedo.logger.error(f"in compute_streamlines(), unknown direction {direction}") 1633 return None 1634 1635 if integrator == "rk2": 1636 sti.SetIntegratorTypeToRungeKutta2() 1637 elif integrator == "rk4": 1638 sti.SetIntegratorTypeToRungeKutta4() 1639 elif integrator == "rk45": 1640 sti.SetIntegratorTypeToRungeKutta45() 1641 else: 1642 vedo.logger.error(f"in compute_streamlines(), unknown integrator {integrator}") 1643 return None 1644 1645 sti.Update() 1646 1647 stlines = vedo.shapes.Lines(sti.GetOutput(), lw=4) 1648 stlines.name = "StreamLines" 1649 self.pipeline = utils.OperationNode( 1650 "compute_streamlines", comment=f"{integrator}", parents=[self, seeds], c="#9e2a2b" 1651 ) 1652 return stlines
Integrate a vector field to generate streamlines.
Arguments:
- seeds : (Mesh, Points, list) starting points of the streamlines
- integrator : (str) type of integration method to be used: - "rk2" (Runge-Kutta 2) - "rk4" (Runge-Kutta 4) - "rk45" (Runge-Kutta 45)
- direction : (str) direction of integration, either "forward", "backward" or "both"
- initial_step_size : (float) initial step size used for line integration
- max_propagation : (float) maximum length of a streamline expressed in absolute units
- max_steps : (int) maximum number of steps for a streamline
- step_length : (float) maximum length of a step expressed in absolute units
- surface_constrained : (bool) whether to stop integrating when the streamline leaves the surface
- compute_vorticity : (bool) whether to compute the vorticity at each streamline point
1655class PointAlgorithms(CommonAlgorithms): 1656 """Methods for point clouds.""" 1657 1658 def apply_transform(self, LT: Any, deep_copy=True) -> Self: 1659 """ 1660 Apply a linear or non-linear transformation to the mesh polygonal data. 1661 1662 Example: 1663 ```python 1664 from vedo import Cube, show, settings 1665 settings.use_parallel_projection = True 1666 c1 = Cube().rotate_z(25).pos(2,1).mirror().alpha(0.5) 1667 T = c1.transform # rotate by 5 degrees, place at (2,1) 1668 c2 = Cube().c('red4').wireframe().lw(10).lighting('off') 1669 c2.apply_transform(T) 1670 show(c1, c2, "The 2 cubes should overlap!", axes=1).close() 1671 ``` 1672 1673 ![](https://vedo.embl.es/images/feats/apply_transform.png) 1674 """ 1675 if self.dataset.GetNumberOfPoints() == 0: 1676 return self 1677 1678 if isinstance(LT, LinearTransform): 1679 LT_is_linear = True 1680 tr = LT.T 1681 if LT.is_identity(): 1682 return self 1683 1684 elif isinstance(LT, (vtki.vtkMatrix4x4, vtki.vtkLinearTransform)) or utils.is_sequence(LT): 1685 LT_is_linear = True 1686 LT = LinearTransform(LT) 1687 tr = LT.T 1688 if LT.is_identity(): 1689 return self 1690 1691 elif isinstance(LT, NonLinearTransform): 1692 LT_is_linear = False 1693 tr = LT.T 1694 self.transform = LT # reset 1695 1696 elif isinstance(LT, vtki.vtkThinPlateSplineTransform): 1697 LT_is_linear = False 1698 tr = LT 1699 self.transform = NonLinearTransform(LT) # reset 1700 1701 else: 1702 vedo.logger.error(f"apply_transform(), unknown input type:\n{LT}") 1703 return self 1704 1705 ################ 1706 if LT_is_linear: 1707 try: 1708 # self.transform might still not be linear 1709 self.transform.concatenate(LT) 1710 except AttributeError: 1711 # in that case reset it 1712 self.transform = LinearTransform() 1713 1714 ################ 1715 if isinstance(self.dataset, vtki.vtkPolyData): 1716 tp = vtki.new("TransformPolyDataFilter") 1717 elif isinstance(self.dataset, vtki.vtkUnstructuredGrid): 1718 tp = vtki.new("TransformFilter") 1719 tp.TransformAllInputVectorsOn() 1720 # elif isinstance(self.dataset, vtki.vtkImageData): 1721 # tp = vtki.new("ImageReslice") 1722 # tp.SetInterpolationModeToCubic() 1723 # tp.SetResliceTransform(tr) 1724 else: 1725 vedo.logger.error(f"apply_transform(), unknown input type: {[self.dataset]}") 1726 return self 1727 1728 tp.SetTransform(tr) 1729 tp.SetInputData(self.dataset) 1730 tp.Update() 1731 out = tp.GetOutput() 1732 1733 if deep_copy: 1734 self.dataset.DeepCopy(out) 1735 else: 1736 self.dataset.ShallowCopy(out) 1737 1738 # reset the locators 1739 self.point_locator = None 1740 self.cell_locator = None 1741 self.line_locator = None 1742 return self 1743 1744 def apply_transform_from_actor(self) -> LinearTransform: 1745 """ 1746 Apply the current transformation of the actor to the data. 1747 Useful when manually moving an actor (eg. when pressing "a"). 1748 Returns the `LinearTransform` object. 1749 1750 Note that this method is automatically called when the window is closed, 1751 or the interactor style is changed. 1752 """ 1753 M = self.actor.GetMatrix() 1754 self.apply_transform(M) 1755 iden = vtki.vtkMatrix4x4() 1756 self.actor.PokeMatrix(iden) 1757 return LinearTransform(M) 1758 1759 def pos(self, x=None, y=None, z=None) -> Self: 1760 """Set/Get object position.""" 1761 if x is None: # get functionality 1762 return self.transform.position 1763 1764 if z is None and y is None: # assume x is of the form (x,y,z) 1765 if len(x) == 3: 1766 x, y, z = x 1767 else: 1768 x, y = x 1769 z = 0 1770 elif z is None: # assume x,y is of the form x, y 1771 z = 0 1772 1773 q = self.transform.position 1774 delta = [x, y, z] - q 1775 if delta[0] == delta[1] == delta[2] == 0: 1776 return self 1777 LT = LinearTransform().translate(delta) 1778 return self.apply_transform(LT) 1779 1780 def shift(self, dx=0, dy=0, dz=0) -> Self: 1781 """Add a vector to the current object position.""" 1782 if utils.is_sequence(dx): 1783 dx, dy, dz = utils.make3d(dx) 1784 if dx == dy == dz == 0: 1785 return self 1786 LT = LinearTransform().translate([dx, dy, dz]) 1787 return self.apply_transform(LT) 1788 1789 def x(self, val=None) -> Self: 1790 """Set/Get object position along x axis.""" 1791 p = self.transform.position 1792 if val is None: 1793 return p[0] 1794 self.pos(val, p[1], p[2]) 1795 return self 1796 1797 def y(self, val=None)-> Self: 1798 """Set/Get object position along y axis.""" 1799 p = self.transform.position 1800 if val is None: 1801 return p[1] 1802 self.pos(p[0], val, p[2]) 1803 return self 1804 1805 def z(self, val=None) -> Self: 1806 """Set/Get object position along z axis.""" 1807 p = self.transform.position 1808 if val is None: 1809 return p[2] 1810 self.pos(p[0], p[1], val) 1811 return self 1812 1813 def rotate(self, angle: float, axis=(1, 0, 0), point=(0, 0, 0), rad=False) -> Self: 1814 """ 1815 Rotate around an arbitrary `axis` passing through `point`. 1816 1817 Example: 1818 ```python 1819 from vedo import * 1820 c1 = Cube() 1821 c2 = c1.clone().c('violet').alpha(0.5) # copy of c1 1822 v = vector(0.2,1,0) 1823 p = vector(1,0,0) # axis passes through this point 1824 c2.rotate(90, axis=v, point=p) 1825 l = Line(-v+p, v+p).lw(3).c('red') 1826 show(c1, l, c2, axes=1).close() 1827 ``` 1828 1829 ![](https://vedo.embl.es/images/feats/rotate_axis.png) 1830 """ 1831 LT = LinearTransform() 1832 LT.rotate(angle, axis, point, rad) 1833 return self.apply_transform(LT) 1834 1835 def rotate_x(self, angle: float, rad=False, around=None) -> Self: 1836 """ 1837 Rotate around x-axis. If angle is in radians set `rad=True`. 1838 1839 Use `around` to define a pivoting point. 1840 """ 1841 if angle == 0: 1842 return self 1843 LT = LinearTransform().rotate_x(angle, rad, around) 1844 return self.apply_transform(LT) 1845 1846 def rotate_y(self, angle: float, rad=False, around=None) -> Self: 1847 """ 1848 Rotate around y-axis. If angle is in radians set `rad=True`. 1849 1850 Use `around` to define a pivoting point. 1851 """ 1852 if angle == 0: 1853 return self 1854 LT = LinearTransform().rotate_y(angle, rad, around) 1855 return self.apply_transform(LT) 1856 1857 def rotate_z(self, angle: float, rad=False, around=None) -> Self: 1858 """ 1859 Rotate around z-axis. If angle is in radians set `rad=True`. 1860 1861 Use `around` to define a pivoting point. 1862 """ 1863 if angle == 0: 1864 return self 1865 LT = LinearTransform().rotate_z(angle, rad, around) 1866 return self.apply_transform(LT) 1867 1868 def reorient(self, initaxis, newaxis, rotation=0, rad=False, xyplane=False) -> Self: 1869 """ 1870 Reorient the object to point to a new direction from an initial one. 1871 If `initaxis` is None, the object will be assumed in its "default" orientation. 1872 If `xyplane` is True, the object will be rotated to lie on the xy plane. 1873 1874 Use `rotation` to first rotate the object around its `initaxis`. 1875 """ 1876 q = self.transform.position 1877 LT = LinearTransform() 1878 LT.reorient(initaxis, newaxis, q, rotation, rad, xyplane) 1879 return self.apply_transform(LT) 1880 1881 def scale(self, s=None, reset=False, origin=True) -> Union[Self, np.array]: 1882 """ 1883 Set/get object's scaling factor. 1884 1885 Arguments: 1886 s : (list, float) 1887 scaling factor(s). 1888 reset : (bool) 1889 if True previous scaling factors are ignored. 1890 origin : (bool) 1891 if True scaling is applied with respect to object's position, 1892 otherwise is applied respect to (0,0,0). 1893 1894 Note: 1895 use `s=(sx,sy,sz)` to scale differently in the three coordinates. 1896 """ 1897 if s is None: 1898 return np.array(self.transform.T.GetScale()) 1899 1900 if not utils.is_sequence(s): 1901 s = [s, s, s] 1902 1903 LT = LinearTransform() 1904 if reset: 1905 old_s = np.array(self.transform.T.GetScale()) 1906 LT.scale(s / old_s) 1907 else: 1908 if origin is True: 1909 LT.scale(s, origin=self.transform.position) 1910 elif origin is False: 1911 LT.scale(s, origin=False) 1912 else: 1913 LT.scale(s, origin=origin) 1914 1915 return self.apply_transform(LT)
Methods for point clouds.
1658 def apply_transform(self, LT: Any, deep_copy=True) -> Self: 1659 """ 1660 Apply a linear or non-linear transformation to the mesh polygonal data. 1661 1662 Example: 1663 ```python 1664 from vedo import Cube, show, settings 1665 settings.use_parallel_projection = True 1666 c1 = Cube().rotate_z(25).pos(2,1).mirror().alpha(0.5) 1667 T = c1.transform # rotate by 5 degrees, place at (2,1) 1668 c2 = Cube().c('red4').wireframe().lw(10).lighting('off') 1669 c2.apply_transform(T) 1670 show(c1, c2, "The 2 cubes should overlap!", axes=1).close() 1671 ``` 1672 1673 ![](https://vedo.embl.es/images/feats/apply_transform.png) 1674 """ 1675 if self.dataset.GetNumberOfPoints() == 0: 1676 return self 1677 1678 if isinstance(LT, LinearTransform): 1679 LT_is_linear = True 1680 tr = LT.T 1681 if LT.is_identity(): 1682 return self 1683 1684 elif isinstance(LT, (vtki.vtkMatrix4x4, vtki.vtkLinearTransform)) or utils.is_sequence(LT): 1685 LT_is_linear = True 1686 LT = LinearTransform(LT) 1687 tr = LT.T 1688 if LT.is_identity(): 1689 return self 1690 1691 elif isinstance(LT, NonLinearTransform): 1692 LT_is_linear = False 1693 tr = LT.T 1694 self.transform = LT # reset 1695 1696 elif isinstance(LT, vtki.vtkThinPlateSplineTransform): 1697 LT_is_linear = False 1698 tr = LT 1699 self.transform = NonLinearTransform(LT) # reset 1700 1701 else: 1702 vedo.logger.error(f"apply_transform(), unknown input type:\n{LT}") 1703 return self 1704 1705 ################ 1706 if LT_is_linear: 1707 try: 1708 # self.transform might still not be linear 1709 self.transform.concatenate(LT) 1710 except AttributeError: 1711 # in that case reset it 1712 self.transform = LinearTransform() 1713 1714 ################ 1715 if isinstance(self.dataset, vtki.vtkPolyData): 1716 tp = vtki.new("TransformPolyDataFilter") 1717 elif isinstance(self.dataset, vtki.vtkUnstructuredGrid): 1718 tp = vtki.new("TransformFilter") 1719 tp.TransformAllInputVectorsOn() 1720 # elif isinstance(self.dataset, vtki.vtkImageData): 1721 # tp = vtki.new("ImageReslice") 1722 # tp.SetInterpolationModeToCubic() 1723 # tp.SetResliceTransform(tr) 1724 else: 1725 vedo.logger.error(f"apply_transform(), unknown input type: {[self.dataset]}") 1726 return self 1727 1728 tp.SetTransform(tr) 1729 tp.SetInputData(self.dataset) 1730 tp.Update() 1731 out = tp.GetOutput() 1732 1733 if deep_copy: 1734 self.dataset.DeepCopy(out) 1735 else: 1736 self.dataset.ShallowCopy(out) 1737 1738 # reset the locators 1739 self.point_locator = None 1740 self.cell_locator = None 1741 self.line_locator = None 1742 return self
Apply a linear or non-linear transformation to the mesh polygonal data.
Example:
from vedo import Cube, show, settings
settings.use_parallel_projection = True
c1 = Cube().rotate_z(25).pos(2,1).mirror().alpha(0.5)
T = c1.transform # rotate by 5 degrees, place at (2,1)
c2 = Cube().c('red4').wireframe().lw(10).lighting('off')
c2.apply_transform(T)
show(c1, c2, "The 2 cubes should overlap!", axes=1).close()
1744 def apply_transform_from_actor(self) -> LinearTransform: 1745 """ 1746 Apply the current transformation of the actor to the data. 1747 Useful when manually moving an actor (eg. when pressing "a"). 1748 Returns the `LinearTransform` object. 1749 1750 Note that this method is automatically called when the window is closed, 1751 or the interactor style is changed. 1752 """ 1753 M = self.actor.GetMatrix() 1754 self.apply_transform(M) 1755 iden = vtki.vtkMatrix4x4() 1756 self.actor.PokeMatrix(iden) 1757 return LinearTransform(M)
Apply the current transformation of the actor to the data.
Useful when manually moving an actor (eg. when pressing "a").
Returns the LinearTransform
object.
Note that this method is automatically called when the window is closed, or the interactor style is changed.
1759 def pos(self, x=None, y=None, z=None) -> Self: 1760 """Set/Get object position.""" 1761 if x is None: # get functionality 1762 return self.transform.position 1763 1764 if z is None and y is None: # assume x is of the form (x,y,z) 1765 if len(x) == 3: 1766 x, y, z = x 1767 else: 1768 x, y = x 1769 z = 0 1770 elif z is None: # assume x,y is of the form x, y 1771 z = 0 1772 1773 q = self.transform.position 1774 delta = [x, y, z] - q 1775 if delta[0] == delta[1] == delta[2] == 0: 1776 return self 1777 LT = LinearTransform().translate(delta) 1778 return self.apply_transform(LT)
Set/Get object position.
1780 def shift(self, dx=0, dy=0, dz=0) -> Self: 1781 """Add a vector to the current object position.""" 1782 if utils.is_sequence(dx): 1783 dx, dy, dz = utils.make3d(dx) 1784 if dx == dy == dz == 0: 1785 return self 1786 LT = LinearTransform().translate([dx, dy, dz]) 1787 return self.apply_transform(LT)
Add a vector to the current object position.
1789 def x(self, val=None) -> Self: 1790 """Set/Get object position along x axis.""" 1791 p = self.transform.position 1792 if val is None: 1793 return p[0] 1794 self.pos(val, p[1], p[2]) 1795 return self
Set/Get object position along x axis.
1797 def y(self, val=None)-> Self: 1798 """Set/Get object position along y axis.""" 1799 p = self.transform.position 1800 if val is None: 1801 return p[1] 1802 self.pos(p[0], val, p[2]) 1803 return self
Set/Get object position along y axis.
1805 def z(self, val=None) -> Self: 1806 """Set/Get object position along z axis.""" 1807 p = self.transform.position 1808 if val is None: 1809 return p[2] 1810 self.pos(p[0], p[1], val) 1811 return self
Set/Get object position along z axis.
1813 def rotate(self, angle: float, axis=(1, 0, 0), point=(0, 0, 0), rad=False) -> Self: 1814 """ 1815 Rotate around an arbitrary `axis` passing through `point`. 1816 1817 Example: 1818 ```python 1819 from vedo import * 1820 c1 = Cube() 1821 c2 = c1.clone().c('violet').alpha(0.5) # copy of c1 1822 v = vector(0.2,1,0) 1823 p = vector(1,0,0) # axis passes through this point 1824 c2.rotate(90, axis=v, point=p) 1825 l = Line(-v+p, v+p).lw(3).c('red') 1826 show(c1, l, c2, axes=1).close() 1827 ``` 1828 1829 ![](https://vedo.embl.es/images/feats/rotate_axis.png) 1830 """ 1831 LT = LinearTransform() 1832 LT.rotate(angle, axis, point, rad) 1833 return self.apply_transform(LT)
Rotate around an arbitrary axis
passing through point
.
Example:
from vedo import *
c1 = Cube()
c2 = c1.clone().c('violet').alpha(0.5) # copy of c1
v = vector(0.2,1,0)
p = vector(1,0,0) # axis passes through this point
c2.rotate(90, axis=v, point=p)
l = Line(-v+p, v+p).lw(3).c('red')
show(c1, l, c2, axes=1).close()
1835 def rotate_x(self, angle: float, rad=False, around=None) -> Self: 1836 """ 1837 Rotate around x-axis. If angle is in radians set `rad=True`. 1838 1839 Use `around` to define a pivoting point. 1840 """ 1841 if angle == 0: 1842 return self 1843 LT = LinearTransform().rotate_x(angle, rad, around) 1844 return self.apply_transform(LT)
Rotate around x-axis. If angle is in radians set rad=True
.
Use around
to define a pivoting point.
1846 def rotate_y(self, angle: float, rad=False, around=None) -> Self: 1847 """ 1848 Rotate around y-axis. If angle is in radians set `rad=True`. 1849 1850 Use `around` to define a pivoting point. 1851 """ 1852 if angle == 0: 1853 return self 1854 LT = LinearTransform().rotate_y(angle, rad, around) 1855 return self.apply_transform(LT)
Rotate around y-axis. If angle is in radians set rad=True
.
Use around
to define a pivoting point.
1857 def rotate_z(self, angle: float, rad=False, around=None) -> Self: 1858 """ 1859 Rotate around z-axis. If angle is in radians set `rad=True`. 1860 1861 Use `around` to define a pivoting point. 1862 """ 1863 if angle == 0: 1864 return self 1865 LT = LinearTransform().rotate_z(angle, rad, around) 1866 return self.apply_transform(LT)
Rotate around z-axis. If angle is in radians set rad=True
.
Use around
to define a pivoting point.
1868 def reorient(self, initaxis, newaxis, rotation=0, rad=False, xyplane=False) -> Self: 1869 """ 1870 Reorient the object to point to a new direction from an initial one. 1871 If `initaxis` is None, the object will be assumed in its "default" orientation. 1872 If `xyplane` is True, the object will be rotated to lie on the xy plane. 1873 1874 Use `rotation` to first rotate the object around its `initaxis`. 1875 """ 1876 q = self.transform.position 1877 LT = LinearTransform() 1878 LT.reorient(initaxis, newaxis, q, rotation, rad, xyplane) 1879 return self.apply_transform(LT)
Reorient the object to point to a new direction from an initial one.
If initaxis
is None, the object will be assumed in its "default" orientation.
If xyplane
is True, the object will be rotated to lie on the xy plane.
Use rotation
to first rotate the object around its initaxis
.
1881 def scale(self, s=None, reset=False, origin=True) -> Union[Self, np.array]: 1882 """ 1883 Set/get object's scaling factor. 1884 1885 Arguments: 1886 s : (list, float) 1887 scaling factor(s). 1888 reset : (bool) 1889 if True previous scaling factors are ignored. 1890 origin : (bool) 1891 if True scaling is applied with respect to object's position, 1892 otherwise is applied respect to (0,0,0). 1893 1894 Note: 1895 use `s=(sx,sy,sz)` to scale differently in the three coordinates. 1896 """ 1897 if s is None: 1898 return np.array(self.transform.T.GetScale()) 1899 1900 if not utils.is_sequence(s): 1901 s = [s, s, s] 1902 1903 LT = LinearTransform() 1904 if reset: 1905 old_s = np.array(self.transform.T.GetScale()) 1906 LT.scale(s / old_s) 1907 else: 1908 if origin is True: 1909 LT.scale(s, origin=self.transform.position) 1910 elif origin is False: 1911 LT.scale(s, origin=False) 1912 else: 1913 LT.scale(s, origin=origin) 1914 1915 return self.apply_transform(LT)
Set/get object's scaling factor.
Arguments:
- s : (list, float) scaling factor(s).
- reset : (bool) if True previous scaling factors are ignored.
- origin : (bool) if True scaling is applied with respect to object's position, otherwise is applied respect to (0,0,0).
Note:
use
s=(sx,sy,sz)
to scale differently in the three coordinates.
Inherited Members
- CommonAlgorithms
- pointdata
- celldata
- metadata
- memory_address
- memory_size
- modified
- box
- update_dataset
- bounds
- xbounds
- ybounds
- zbounds
- diagonal_size
- average_size
- center_of_mass
- copy_data_from
- inputdata
- npoints
- nvertices
- ncells
- points
- cell_centers
- lines
- lines_as_flat_array
- mark_boundaries
- find_cells_in_bounds
- find_cells_along_line
- find_cells_along_plane
- keep_cell_types
- map_cells_to_points
- vertices
- coordinates
- cells_as_flat_array
- cells
- map_points_to_cells
- resample_data_from
- interpolate_data_from
- add_ids
- gradient
- divergence
- vorticity
- probe
- compute_cell_size
- generate_random_data
- integrate_data
- write
- tomesh
- signed_distance
- unsigned_distance
- smooth_data
- compute_streamlines
1919class VolumeAlgorithms(CommonAlgorithms): 1920 """Methods for Volume objects.""" 1921 1922 def bounds(self) -> np.ndarray: 1923 """ 1924 Get the object bounds. 1925 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 1926 """ 1927 # OVERRIDE CommonAlgorithms.bounds() which is too slow 1928 return np.array(self.dataset.GetBounds()) 1929 1930 def isosurface(self, value=None, flying_edges=False) -> "vedo.mesh.Mesh": 1931 """ 1932 Return an `Mesh` isosurface extracted from the `Volume` object. 1933 1934 Set `value` as single float or list of values to draw the isosurface(s). 1935 Use flying_edges for faster results (but sometimes can interfere with `smooth()`). 1936 1937 Examples: 1938 - [isosurfaces1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces1.py) 1939 1940 ![](https://vedo.embl.es/images/volumetric/isosurfaces.png) 1941 """ 1942 scrange = self.dataset.GetScalarRange() 1943 1944 if flying_edges: 1945 cf = vtki.new("FlyingEdges3D") 1946 cf.InterpolateAttributesOn() 1947 else: 1948 cf = vtki.new("ContourFilter") 1949 cf.UseScalarTreeOn() 1950 1951 cf.SetInputData(self.dataset) 1952 cf.ComputeNormalsOn() 1953 1954 if utils.is_sequence(value): 1955 cf.SetNumberOfContours(len(value)) 1956 for i, t in enumerate(value): 1957 cf.SetValue(i, t) 1958 else: 1959 if value is None: 1960 value = (2 * scrange[0] + scrange[1]) / 3.0 1961 # print("automatic isosurface value =", value) 1962 cf.SetValue(0, value) 1963 1964 cf.Update() 1965 poly = cf.GetOutput() 1966 1967 out = vedo.mesh.Mesh(poly, c=None).phong() 1968 out.mapper.SetScalarRange(scrange[0], scrange[1]) 1969 1970 out.pipeline = utils.OperationNode( 1971 "isosurface", 1972 parents=[self], 1973 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 1974 c="#4cc9f0:#e9c46a", 1975 ) 1976 return out 1977 1978 def isosurface_discrete(self, value=None, nsmooth=15) -> "vedo.mesh.Mesh": 1979 """ 1980 Create boundary/isocontour surfaces from a label map (e.g., a segmented image) using a threaded, 1981 3D version of the multiple objects/labels Surface Nets algorithm. 1982 The input is a 3D image (i.e., volume) where each voxel is labeled 1983 (integer labels are preferred to real values), and the output data is a polygonal mesh separating 1984 labeled regions / objects. 1985 (Note that on output each region [corresponding to a different segmented object] will share 1986 points/edges on a common boundary, i.e., two neighboring objects will share the boundary that separates them). 1987 1988 Arguments: 1989 value : (float, list) 1990 single value or list of values to draw the isosurface(s). 1991 nsmooth : (int) 1992 number of iterations of smoothing (0 means no smoothing). 1993 1994 Examples: 1995 - [isosurfaces2.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces2.py) 1996 """ 1997 if not utils.is_sequence(value): 1998 value = [value] 1999 2000 snets = vtki.new("SurfaceNets3D") 2001 snets.SetInputData(self.dataset) 2002 2003 if nsmooth: 2004 snets.SmoothingOn() 2005 snets.AutomaticSmoothingConstraintsOn() 2006 snets.GetSmoother().SetNumberOfIterations(nsmooth) 2007 # snets.GetSmoother().SetRelaxationFactor(relaxation_factor) 2008 # snets.GetSmoother().SetConstraintDistance(constraint_distance) 2009 else: 2010 snets.SmoothingOff() 2011 2012 for i, val in enumerate(value): 2013 snets.SetValue(i, val) 2014 snets.Update() 2015 snets.SetOutputMeshTypeToTriangles() 2016 snets.SetOutputStyleToBoundary() 2017 snets.Update() 2018 2019 out = vedo.mesh.Mesh(snets.GetOutput()) 2020 out.pipeline = utils.OperationNode( 2021 "isosurface_discrete", 2022 parents=[self], 2023 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 2024 c="#4cc9f0:#e9c46a", 2025 ) 2026 return out 2027 2028 2029 def legosurface( 2030 self, 2031 vmin=None, 2032 vmax=None, 2033 invert=False, 2034 boundary=False, 2035 array_name="input_scalars", 2036 ) -> "vedo.mesh.Mesh": 2037 """ 2038 Represent an object - typically a `Volume` - as lego blocks (voxels). 2039 By default colors correspond to the volume's scalar. 2040 Returns an `Mesh` object. 2041 2042 Arguments: 2043 vmin : (float) 2044 the lower threshold, voxels below this value are not shown. 2045 vmax : (float) 2046 the upper threshold, voxels above this value are not shown. 2047 boundary : (bool) 2048 controls whether to include cells that are partially inside 2049 array_name : (int, str) 2050 name or index of the scalar array to be considered 2051 2052 Examples: 2053 - [legosurface.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/legosurface.py) 2054 2055 ![](https://vedo.embl.es/images/volumetric/56820682-da40e500-684c-11e9-8ea3-91cbcba24b3a.png) 2056 """ 2057 imp_dataset = vtki.new("ImplicitDataSet") 2058 imp_dataset.SetDataSet(self.dataset) 2059 window = vtki.new("ImplicitWindowFunction") 2060 window.SetImplicitFunction(imp_dataset) 2061 2062 srng = list(self.dataset.GetScalarRange()) 2063 if vmin is not None: 2064 srng[0] = vmin 2065 if vmax is not None: 2066 srng[1] = vmax 2067 tol = 0.00001 * (srng[1] - srng[0]) 2068 srng[0] -= tol 2069 srng[1] += tol 2070 window.SetWindowRange(srng) 2071 2072 extract = vtki.new("ExtractGeometry") 2073 extract.SetInputData(self.dataset) 2074 extract.SetImplicitFunction(window) 2075 extract.SetExtractInside(invert) 2076 extract.SetExtractBoundaryCells(boundary) 2077 extract.Update() 2078 2079 gf = vtki.new("GeometryFilter") 2080 gf.SetInputData(extract.GetOutput()) 2081 gf.Update() 2082 2083 m = vedo.mesh.Mesh(gf.GetOutput()).lw(0.1).flat() 2084 m.map_points_to_cells() 2085 m.celldata.select(array_name) 2086 2087 m.pipeline = utils.OperationNode( 2088 "legosurface", 2089 parents=[self], 2090 comment=f"array: {array_name}", 2091 c="#4cc9f0:#e9c46a", 2092 ) 2093 return m 2094 2095 def tomesh(self, fill=True, shrink=1.0) -> "vedo.mesh.Mesh": 2096 """ 2097 Build a polygonal Mesh from the current object. 2098 2099 If `fill=True`, the interior faces of all the cells are created. 2100 (setting a `shrink` value slightly smaller than the default 1.0 2101 can avoid flickering due to internal adjacent faces). 2102 2103 If `fill=False`, only the boundary faces will be generated. 2104 """ 2105 gf = vtki.new("GeometryFilter") 2106 if fill: 2107 sf = vtki.new("ShrinkFilter") 2108 sf.SetInputData(self.dataset) 2109 sf.SetShrinkFactor(shrink) 2110 sf.Update() 2111 gf.SetInputData(sf.GetOutput()) 2112 gf.Update() 2113 poly = gf.GetOutput() 2114 if shrink == 1.0: 2115 clean_poly = vtki.new("CleanPolyData") 2116 clean_poly.PointMergingOn() 2117 clean_poly.ConvertLinesToPointsOn() 2118 clean_poly.ConvertPolysToLinesOn() 2119 clean_poly.ConvertStripsToPolysOn() 2120 clean_poly.SetInputData(poly) 2121 clean_poly.Update() 2122 poly = clean_poly.GetOutput() 2123 else: 2124 gf.SetInputData(self.dataset) 2125 gf.Update() 2126 poly = gf.GetOutput() 2127 2128 msh = vedo.mesh.Mesh(poly).flat() 2129 msh.scalarbar = self.scalarbar 2130 lut = utils.ctf2lut(self) 2131 if lut: 2132 msh.mapper.SetLookupTable(lut) 2133 2134 msh.pipeline = utils.OperationNode( 2135 "tomesh", parents=[self], comment=f"fill={fill}", c="#9e2a2b:#e9c46a" 2136 ) 2137 return msh
Methods for Volume objects.
1922 def bounds(self) -> np.ndarray: 1923 """ 1924 Get the object bounds. 1925 Returns a list in format `[xmin,xmax, ymin,ymax, zmin,zmax]`. 1926 """ 1927 # OVERRIDE CommonAlgorithms.bounds() which is too slow 1928 return np.array(self.dataset.GetBounds())
Get the object bounds.
Returns a list in format [xmin,xmax, ymin,ymax, zmin,zmax]
.
1930 def isosurface(self, value=None, flying_edges=False) -> "vedo.mesh.Mesh": 1931 """ 1932 Return an `Mesh` isosurface extracted from the `Volume` object. 1933 1934 Set `value` as single float or list of values to draw the isosurface(s). 1935 Use flying_edges for faster results (but sometimes can interfere with `smooth()`). 1936 1937 Examples: 1938 - [isosurfaces1.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces1.py) 1939 1940 ![](https://vedo.embl.es/images/volumetric/isosurfaces.png) 1941 """ 1942 scrange = self.dataset.GetScalarRange() 1943 1944 if flying_edges: 1945 cf = vtki.new("FlyingEdges3D") 1946 cf.InterpolateAttributesOn() 1947 else: 1948 cf = vtki.new("ContourFilter") 1949 cf.UseScalarTreeOn() 1950 1951 cf.SetInputData(self.dataset) 1952 cf.ComputeNormalsOn() 1953 1954 if utils.is_sequence(value): 1955 cf.SetNumberOfContours(len(value)) 1956 for i, t in enumerate(value): 1957 cf.SetValue(i, t) 1958 else: 1959 if value is None: 1960 value = (2 * scrange[0] + scrange[1]) / 3.0 1961 # print("automatic isosurface value =", value) 1962 cf.SetValue(0, value) 1963 1964 cf.Update() 1965 poly = cf.GetOutput() 1966 1967 out = vedo.mesh.Mesh(poly, c=None).phong() 1968 out.mapper.SetScalarRange(scrange[0], scrange[1]) 1969 1970 out.pipeline = utils.OperationNode( 1971 "isosurface", 1972 parents=[self], 1973 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 1974 c="#4cc9f0:#e9c46a", 1975 ) 1976 return out
Return an Mesh
isosurface extracted from the Volume
object.
Set value
as single float or list of values to draw the isosurface(s).
Use flying_edges for faster results (but sometimes can interfere with smooth()
).
Examples:
1978 def isosurface_discrete(self, value=None, nsmooth=15) -> "vedo.mesh.Mesh": 1979 """ 1980 Create boundary/isocontour surfaces from a label map (e.g., a segmented image) using a threaded, 1981 3D version of the multiple objects/labels Surface Nets algorithm. 1982 The input is a 3D image (i.e., volume) where each voxel is labeled 1983 (integer labels are preferred to real values), and the output data is a polygonal mesh separating 1984 labeled regions / objects. 1985 (Note that on output each region [corresponding to a different segmented object] will share 1986 points/edges on a common boundary, i.e., two neighboring objects will share the boundary that separates them). 1987 1988 Arguments: 1989 value : (float, list) 1990 single value or list of values to draw the isosurface(s). 1991 nsmooth : (int) 1992 number of iterations of smoothing (0 means no smoothing). 1993 1994 Examples: 1995 - [isosurfaces2.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/isosurfaces2.py) 1996 """ 1997 if not utils.is_sequence(value): 1998 value = [value] 1999 2000 snets = vtki.new("SurfaceNets3D") 2001 snets.SetInputData(self.dataset) 2002 2003 if nsmooth: 2004 snets.SmoothingOn() 2005 snets.AutomaticSmoothingConstraintsOn() 2006 snets.GetSmoother().SetNumberOfIterations(nsmooth) 2007 # snets.GetSmoother().SetRelaxationFactor(relaxation_factor) 2008 # snets.GetSmoother().SetConstraintDistance(constraint_distance) 2009 else: 2010 snets.SmoothingOff() 2011 2012 for i, val in enumerate(value): 2013 snets.SetValue(i, val) 2014 snets.Update() 2015 snets.SetOutputMeshTypeToTriangles() 2016 snets.SetOutputStyleToBoundary() 2017 snets.Update() 2018 2019 out = vedo.mesh.Mesh(snets.GetOutput()) 2020 out.pipeline = utils.OperationNode( 2021 "isosurface_discrete", 2022 parents=[self], 2023 comment=f"#pts {out.dataset.GetNumberOfPoints()}", 2024 c="#4cc9f0:#e9c46a", 2025 ) 2026 return out
Create boundary/isocontour surfaces from a label map (e.g., a segmented image) using a threaded, 3D version of the multiple objects/labels Surface Nets algorithm. The input is a 3D image (i.e., volume) where each voxel is labeled (integer labels are preferred to real values), and the output data is a polygonal mesh separating labeled regions / objects. (Note that on output each region [corresponding to a different segmented object] will share points/edges on a common boundary, i.e., two neighboring objects will share the boundary that separates them).
Arguments:
- value : (float, list) single value or list of values to draw the isosurface(s).
- nsmooth : (int) number of iterations of smoothing (0 means no smoothing).
Examples:
2029 def legosurface( 2030 self, 2031 vmin=None, 2032 vmax=None, 2033 invert=False, 2034 boundary=False, 2035 array_name="input_scalars", 2036 ) -> "vedo.mesh.Mesh": 2037 """ 2038 Represent an object - typically a `Volume` - as lego blocks (voxels). 2039 By default colors correspond to the volume's scalar. 2040 Returns an `Mesh` object. 2041 2042 Arguments: 2043 vmin : (float) 2044 the lower threshold, voxels below this value are not shown. 2045 vmax : (float) 2046 the upper threshold, voxels above this value are not shown. 2047 boundary : (bool) 2048 controls whether to include cells that are partially inside 2049 array_name : (int, str) 2050 name or index of the scalar array to be considered 2051 2052 Examples: 2053 - [legosurface.py](https://github.com/marcomusy/vedo/tree/master/examples/volumetric/legosurface.py) 2054 2055 ![](https://vedo.embl.es/images/volumetric/56820682-da40e500-684c-11e9-8ea3-91cbcba24b3a.png) 2056 """ 2057 imp_dataset = vtki.new("ImplicitDataSet") 2058 imp_dataset.SetDataSet(self.dataset) 2059 window = vtki.new("ImplicitWindowFunction") 2060 window.SetImplicitFunction(imp_dataset) 2061 2062 srng = list(self.dataset.GetScalarRange()) 2063 if vmin is not None: 2064 srng[0] = vmin 2065 if vmax is not None: 2066 srng[1] = vmax 2067 tol = 0.00001 * (srng[1] - srng[0]) 2068 srng[0] -= tol 2069 srng[1] += tol 2070 window.SetWindowRange(srng) 2071 2072 extract = vtki.new("ExtractGeometry") 2073 extract.SetInputData(self.dataset) 2074 extract.SetImplicitFunction(window) 2075 extract.SetExtractInside(invert) 2076 extract.SetExtractBoundaryCells(boundary) 2077 extract.Update() 2078 2079 gf = vtki.new("GeometryFilter") 2080 gf.SetInputData(extract.GetOutput()) 2081 gf.Update() 2082 2083 m = vedo.mesh.Mesh(gf.GetOutput()).lw(0.1).flat() 2084 m.map_points_to_cells() 2085 m.celldata.select(array_name) 2086 2087 m.pipeline = utils.OperationNode( 2088 "legosurface", 2089 parents=[self], 2090 comment=f"array: {array_name}", 2091 c="#4cc9f0:#e9c46a", 2092 ) 2093 return m
Represent an object - typically a Volume
- as lego blocks (voxels).
By default colors correspond to the volume's scalar.
Returns an Mesh
object.
Arguments:
- vmin : (float) the lower threshold, voxels below this value are not shown.
- vmax : (float) the upper threshold, voxels above this value are not shown.
- boundary : (bool) controls whether to include cells that are partially inside
- array_name : (int, str) name or index of the scalar array to be considered
Examples:
2095 def tomesh(self, fill=True, shrink=1.0) -> "vedo.mesh.Mesh": 2096 """ 2097 Build a polygonal Mesh from the current object. 2098 2099 If `fill=True`, the interior faces of all the cells are created. 2100 (setting a `shrink` value slightly smaller than the default 1.0 2101 can avoid flickering due to internal adjacent faces). 2102 2103 If `fill=False`, only the boundary faces will be generated. 2104 """ 2105 gf = vtki.new("GeometryFilter") 2106 if fill: 2107 sf = vtki.new("ShrinkFilter") 2108 sf.SetInputData(self.dataset) 2109 sf.SetShrinkFactor(shrink) 2110 sf.Update() 2111 gf.SetInputData(sf.GetOutput()) 2112 gf.Update() 2113 poly = gf.GetOutput() 2114 if shrink == 1.0: 2115 clean_poly = vtki.new("CleanPolyData") 2116 clean_poly.PointMergingOn() 2117 clean_poly.ConvertLinesToPointsOn() 2118 clean_poly.ConvertPolysToLinesOn() 2119 clean_poly.ConvertStripsToPolysOn() 2120 clean_poly.SetInputData(poly) 2121 clean_poly.Update() 2122 poly = clean_poly.GetOutput() 2123 else: 2124 gf.SetInputData(self.dataset) 2125 gf.Update() 2126 poly = gf.GetOutput() 2127 2128 msh = vedo.mesh.Mesh(poly).flat() 2129 msh.scalarbar = self.scalarbar 2130 lut = utils.ctf2lut(self) 2131 if lut: 2132 msh.mapper.SetLookupTable(lut) 2133 2134 msh.pipeline = utils.OperationNode( 2135 "tomesh", parents=[self], comment=f"fill={fill}", c="#9e2a2b:#e9c46a" 2136 ) 2137 return msh
Build a polygonal Mesh from the current object.
If fill=True
, the interior faces of all the cells are created.
(setting a shrink
value slightly smaller than the default 1.0
can avoid flickering due to internal adjacent faces).
If fill=False
, only the boundary faces will be generated.
Inherited Members
- CommonAlgorithms
- pointdata
- celldata
- metadata
- memory_address
- memory_size
- modified
- box
- update_dataset
- xbounds
- ybounds
- zbounds
- diagonal_size
- average_size
- center_of_mass
- copy_data_from
- inputdata
- npoints
- nvertices
- ncells
- points
- cell_centers
- lines
- lines_as_flat_array
- mark_boundaries
- find_cells_in_bounds
- find_cells_along_line
- find_cells_along_plane
- keep_cell_types
- map_cells_to_points
- vertices
- coordinates
- cells_as_flat_array
- cells
- map_points_to_cells
- resample_data_from
- interpolate_data_from
- add_ids
- gradient
- divergence
- vorticity
- probe
- compute_cell_size
- generate_random_data
- integrate_data
- write
- signed_distance
- unsigned_distance
- smooth_data
- compute_streamlines