#!/usr/bin/env python # coding: utf-8 # # The fastai Image classes # In[1]: from fastai.gen_doc.nbdoc import * from fastai.vision import * from fastai import * # The fastai library is built such that the pictures loaded are wrapped in an [`Image`](/vision.image.html#Image). This [`Image`](/vision.image.html#Image) contains the array of pixels associated to the picture, but also has a lot of built-in functions that will help the fastai library to process transformations applied to the corresponding image. There are also sub-classes for special types of image-like objects: # # - [`ImageSegment`](/vision.image.html#ImageSegment) for segmentation masks # - [`ImageBBox`](/vision.image.html#ImageBBox) for bounding boxes # # See the following sections for documentation of all the details of these classes. But first, let's have a quick look at the main functionality you'll need to know about. # # Opening an image and converting to an [`Image`](/vision.image.html#Image) object is easily done by using the [`open_image`](/vision.image.html#open_image) function: # In[ ]: img = open_image('imgs/cat_example.jpg') img # To look at the picture that this [`Image`](/vision.image.html#Image) contains, you can also use its `show` method. It will show a resized version and has more options to customize the display. # In[ ]: img.show() # This `show` method can take a few arguments (see the documentation of [`show_image`](/vision.image.html#show_image) for details) but the two we will use the most in this documentation are: # - `ax` which is the [matplolib.pyplot axes](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.axes.html) on which we want to show the image # - `title` which is an optional title we can give to the image. # In[ ]: _,axs = plt.subplots(1,4,figsize=(12,4)) for i,ax in enumerate(axs): img.show(ax=ax, title=f'Copy {i+1}') # If you're interested in the tensor of pixels, it's stored in the [`fastai.vision.data`](/vision.data.html#vision.data) attribute of an [`Image`](/vision.image.html#Image). # In[ ]: img.data.shape # ## The Image classes # [`Image`](/vision.image.html#Image) is the class that wraps every picture in the fastai library. It is subclassed to create [`ImageSegment`](/vision.image.html#ImageSegment) and [`ImageBBox`](/vision.image.html#ImageBBox) when dealing with segmentation and object detection tasks. # In[2]: show_doc(Image, title_level=3) # Most of the functions of the [`Image`](/vision.image.html#Image) class deal with the internal pipeline of transforms, so they are only shown at the end of this page. The easiest way to create one is through the function [`open_image`](/vision.image.html#open_image). # In[3]: show_doc(open_image) # In[ ]: img = open_image('imgs/cat_example.jpg') img # In a Jupyter Notebook, the representation of an [`Image`](/vision.image.html#Image) is its underlying picture (shown to its full size). On top of containing the tensor of pixels of the Image (and automatically doing the conversion after decoding the image), this class contains various methods for the implementation of transforms. The [`Image.show`](/vision.image.html#Image.show) method also allows to pass more arguments: # In[4]: show_doc(Image.show, arg_comments ={ 'ax': 'matplotlib.pyplot axes on which show the image', 'figsize': 'Size of the figure', 'title': 'Title to display on top of the graph', 'hide_axis': 'If True, the axis of the graph are hidden', 'cmap': 'Color map to use', 'y': 'Potential target to be superposed on the same graph (mask, bounding box, points)' }, full_name='Image.show') # This allows us to completely customize the display of an [`Image`](/vision.image.html#Image). We'll see examples of the `y` functionality below with segmentation and bounding boxes tasks, for now here is an example using the other features. # In[ ]: img.show(figsize=(2, 1), title='Little kitten') # In[ ]: img.show(figsize=(10,5), title='Big kitten') # An [`Image`](/vision.image.html#Image) object also has a few attributes that can be useful: # - `Image.data` gives you the underlying tensor of pixel # - `Image.shape` gives you the size of that tensor (channels x height x width) # - `Image.size` gives you the size of the image (height x width) # In[ ]: img.data, img.shape, img.size # For a segmentation task, the target is usually a mask. The fastai library represents it as an [`ImageSegment`](/vision.image.html#ImageSegment) object. # In[5]: show_doc(ImageSegment, title_level=3) # To easily open a mask, the function [`open_mask`](/vision.image.html#open_mask) plays the same role as [`open_image`](/vision.image.html#open_image): # In[6]: show_doc(open_mask) # In[ ]: open_mask('imgs/mask_example.png') # From time to time, you may encouter mask data as run lengh encoding string instead of picture. # In[ ]: df = pd.read_csv('imgs/mask_rle_sample.csv') encoded_str = df.iloc[1]['rle_mask']; df[:2] # You can also read a mask in run length encoding, with an extra argument __shape__ for image size # In[ ]: mask = open_mask_rle(df.iloc[0]['rle_mask'], shape=(1918, 1280)).resize((1,128,128)) mask # In[7]: show_doc(open_mask_rle) # The [`open_mask_rle`](/vision.image.html#open_mask_rle) simply make use of the helper function [`rle_decode`](/vision.image.html#rle_decode) # In[ ]: rle_decode(encoded_str, (1912, 1280)).shape # In[8]: show_doc(rle_decode) # You can also convert [`ImageSegment`](/vision.image.html#ImageSegment) to run length encoding. # In[ ]: type(mask) # In[ ]: rle_encode(mask.data) # In[9]: show_doc(rle_encode) # An [`ImageSegment`](/vision.image.html#ImageSegment) object has the same properties as an [`Image`](/vision.image.html#Image). The only difference is that when applying the transformations to an [`ImageSegment`](/vision.image.html#ImageSegment), it will ignore the functions that deal with lighting and keep values of 0 and 1. As explained earlier, it's easy to show the segmentation mask over the associated [`Image`](/vision.image.html#Image) by using the `y` argument of [`show_image`](/vision.image.html#show_image). # In[ ]: img = open_image('imgs/car_example.jpg') mask = open_mask('imgs/mask_example.png') _,axs = plt.subplots(1,3, figsize=(8,4)) img.show(ax=axs[0], title='no mask') img.show(ax=axs[1], y=mask, title='masked') mask.show(ax=axs[2], title='mask only', alpha=1.) # When the targets are a bunch of points, the following class will help. # In[10]: show_doc(ImagePoints, doc_string=False, title_level=3) # Create an [`ImagePoints`](/vision.image.html#ImagePoints) object from a `flow` of coordinates. Coordinates need to be scaled to the range (-1,1) which will be done in the intialization if `scale` is left as `True`. Convention is to have point coordinates in the form `[y,x]` unless `y_first` is set to `False`. # In[ ]: img = open_image('imgs/face_example.jpg') pnts = torch.load('points.pth') pnts = ImagePoints(FlowField(img.size, pnts)) img.show(y=pnts) # Note that the raw points are gathered in a [`FlowField`](/vision.image.html#FlowField) object, which is a class that wraps together a bunch of coordinates with the corresponding image size. In fastai, we expect points to have the y coordinate first by default. The underlying data of `pnts` is the flow of points scaled from -1 to 1 (again with the y coordinate first): # In[ ]: pnts.data[:10] # For an objection detection task, the target is a bounding box containg the picture. # In[11]: show_doc(ImageBBox, doc_string=False, title_level=3) # Create an [`ImageBBox`](/vision.image.html#ImageBBox) object from a `flow` of coordinates. Those coordinates are expected to be in a [`FlowField`](/vision.image.html#FlowField) with an underlying flow of size 4N, if we have N bboxes, describing for each box the top left, top right, bottom left, bottom right corners. Coordinates need to be scaled to the range (-1,1) which will be done in the intialization if `scale` is left as `True`. Convention is to have point coordinates in the form `[y,x]` unless `y_first` is set to `False`. `labels` is an optional collection of labels, which should be the same size as `flow`. `pad_idx` is used if the set of transform somehow leaves the image without any bounding boxes. # To create an [`ImageBBox`](/vision.image.html#ImageBBox), you can use the `create` helper function that takes a list of bounding boxes, the height of the input image, and the width of the input image. Each bounding box is represented by a list of four numbers: the coordinates of the corners of the box with the following convention: top, left, bottom, right. # In[12]: show_doc(ImageBBox.create, arg_comments={ 'bboxes': 'list of bboxes (each of those being four integers with the top, left, bottom, right convention)', 'h': 'height of the input image', 'w': 'width of the input image', 'labels': 'labels of the images', 'pad_idx': 'padding index that will be used to group the ImageBBox in a batch' }) # We need to pass the dimensions of the input image so that [`ImageBBox`](/vision.image.html#ImageBBox) can internally create the [`FlowField`](/vision.image.html#FlowField). Again, the [`Image.show`](/vision.image.html#Image.show) method will display the bouding box on the same image if it's passed as a `y` argument. # In[ ]: img = open_image('imgs/car_bbox.jpg') bbox = ImageBBox.create([[96, 155, 270, 351]], *img.size) img.show(y=bbox) # We can add labels to the object. They must be category codes (so from 0 to the number of classes - 1) but when using the [`Image.show`](/vision.image.html#Image.show) method, we can specify a list of classes. # In[ ]: img = open_image('imgs/car_bbox.jpg') bbox = ImageBBox.create([[96, 155, 270, 351]], *img.size, tensor([0])) img.show(y=bbox, classes=['car'], figsize=(4,3)) # To help with the conversion of images or to show them, we use these helper functions: # In[13]: show_doc(show_image) # In[14]: show_doc(pil2tensor) # In[36]: pil2tensor(PIL.Image.open('imgs/cat_example.jpg').convert("RGB"), np.float32).div_(255).size() # In[39]: pil2tensor(PIL.Image.open('imgs/cat_example.jpg').convert("I"), np.float32).div_(255).size() # In[40]: pil2tensor(PIL.Image.open('imgs/mask_example.png').convert("L"), np.float32).div_(255).size() # In[41]: pil2tensor(np.random.rand(224,224,3).astype(np.float32), np.float32).size() # In[42]: pil2tensor(PIL.Image.open('imgs/cat_example.jpg'), np.float32).div_(255).size() # In[44]: pil2tensor(PIL.Image.open('imgs/mask_example.png'), np.float32).div_(255).size() # In[15]: show_doc(image2np) # In[16]: show_doc(scale_flow) # In[17]: show_doc(bb2hw) # ## Applying transforms # All the transforms available for data augmentation in computer vision are defined in the [vision.transform](vision.transform.ipynb) module. When we want to apply them to an [`Image`](/vision.image.html#Image), we use this function: # In[18]: show_doc(apply_tfms, arg_comments={ 'tfms': '`Transform` or list of `Transform`', 'x': '`Image` to apply the `tfms` to', 'do_resolve': 'if False, the values of random parameters are kept from the last draw', 'xtra': 'extra arguments to pass to the transforms', 'size': 'desired target size', 'mult': 'makes sure the final size is a multiple of mult', 'resize_method': 'how to get to the final size (crop, pad, squish)', 'padding_mode': "how to pad the image ('zeros', 'border', 'reflection')" }) # Before showing examples, let's take a few moments to comment those arguments a bit more: # - `do_resolve` decides if we resolve the random arguments by drawing new numbers or not. The intended use is to have the `tfms` applied to the input `x` with `do_resolve`=True, then, if the target `y` needs to be applied data augmentation (if it's a segmentation mask or bounding box), apply the `tfms` to `y` with `do_resolve`=False. # - `mult` default value is very important to make sure your image can pass through most recent CNNs: they divide the size of the input image by 2 multiple times so both dimensions of your picture should be mutliples of at least 32. Only change the value of this parameter if you know it will be accepted by your model. # Here are a few helper functions to help us load the examples we saw before. # In[ ]: def get_class_ex(): return open_image('imgs/cat_example.jpg') def get_seg_ex(): return open_image('imgs/car_example.jpg'), open_mask('imgs/mask_example.png') def get_pnt_ex(): img = open_image('imgs/face_example.jpg') pnts = torch.load('points.pth') return img, ImagePoints(FlowField(img.size, pnts)) def get_bb_ex(): img = open_image('imgs/car_bbox.jpg') return img, ImageBBox.create([[96, 155, 270, 351]], *img.size) # Now let's grab our usual bunch of transforms and see what they do. # In[ ]: tfms = get_transforms() _, axs = plt.subplots(2,4,figsize=(12,6)) for ax in axs.flatten(): img = apply_tfms(tfms[0], get_class_ex(), size=224) img.show(ax=ax) # Now let's check what it gives for a segmentation task. Note that, as instructed by the documentation of [`apply_tfms`](/vision.image.html#apply_tfms), we first apply the transforms to the input, and then apply them to the target while adding `do_resolve`=False. # In[ ]: tfms = get_transforms() _, axs = plt.subplots(2,4,figsize=(12,6)) for ax in axs.flatten(): img,mask = get_seg_ex() img = apply_tfms(tfms[0], img, size=224) mask = apply_tfms(tfms[0], mask, do_resolve=False, size=224) img.show(ax=ax, y=mask) # Internally, each *transforms* saves the values it randomly picked into a dictionary called *resolved*, which it can reuse for the target. # In[ ]: tfms[0][4] # For points, [`ImagePoints`](/vision.image.html#ImagePoints) will apply the transforms to the coordinates. # In[ ]: tfms = get_transforms() _, axs = plt.subplots(2,4,figsize=(12,6)) for ax in axs.flatten(): img,pnts = get_pnt_ex() img = apply_tfms(tfms[0], img, size=224) pnts = apply_tfms(tfms[0], pnts, do_resolve=False, size=224) img.show(ax=ax, y=pnts) # Now for the bounding box, the [`ImageBBox`](/vision.image.html#ImageBBox) will automatically update the coordinates of the two opposite corners in its data attribute. # In[ ]: tfms = get_transforms() _, axs = plt.subplots(2,4,figsize=(12,6)) for ax in axs.flatten(): img,bbox = get_bb_ex() img = apply_tfms(tfms[0], img, size=224) bbox = apply_tfms(tfms[0], bbox, do_resolve=False, size=224) img.show(ax=ax, y=bbox) # ## Randomness # As explained in the [transform module](http://docs.fast.ai/vision.transform.html#Randomness), to indicate to a [`Transform`](/vision.image.html#Transform) how to randomize an argument, we use a type annotation by a random function. Here is the list of the available functions. # In[19]: show_doc(rand_bool) # In[ ]: rand_bool(0.5, 8) # In[20]: show_doc(uniform) # In[ ]: uniform(0,1,(8,)) # In[21]: show_doc(uniform_int) # In[ ]: uniform_int(0,2,(8,)) # In[22]: show_doc(log_uniform, doc_string=False) # Picks a random number (or tensor `size`) between log(`low`) and log(`high`), then returns its exponential (so that it's between `low` and `high` in the end). # In[ ]: log_uniform(0.5,2,(8,)) # ## Fastai internal pipeline # ### What does a transform do? # Typically, a data augmentation operation will randomly modify an image input. This operation can apply to pixels (when we modify the contrast or brightness for instance) or to coordinates (when we do a rotation, a zoom or a resize). The operations that apply to pixels can easily be coded in numpy/pytorch, directly on an array/tensor but the ones that modify the coordinates are a bit more tricky. # # They usually come in three steps: first we create a grid of coordinates for our picture: this is an array of size `h * w * 2` (`h` for height, `w` for width in the rest of this post) that contains in position i,j two floats representing the position of the pixel (i,j) in the picture. They could simply be the integers i and j, but since most transformations are centered with the center of the picture as origin, they’re usually rescaled to go from -1 to 1, (-1,-1) being the top left corner of the picture, (1,1) the bottom right corner (and (0,0) the center), and this can be seen as a regular grid of size h * w. Here is a what our grid would look like for a 5px by 5px image. # # Example of grid # # Then, we apply the transformation to modify this grid of coordinates. For instance, if we want to apply an affine transformation (like a rotation) we will transform each of those vectors `x` of size 2 by `A @ x + b` at every position in the grid. This will give us the new coordinates, as seen here in the case of our previous grid. # # Example of grid rotated # # There are two problems that arise after the transformation: the first one is that the pixel values won’t fall exactly on the grid, and the other is that we can get values that get out of the grid (one of the coordinates is greater than 1 or lower than -1). # # To solve the first problem, we use an interpolation. If we forget the rescale for a minute and go back to coordinates being integers, the result of our transformation gives us float coordinates, and we need to decide, for each (i,j), which pixel value in the original picture we need to take. The most basic interpolation called nearest neighbor would just round the floats and take the nearest integers. If we think in terms of the grid of coordinates (going from -1 to 1), the result of our transformation gives a point that isn’t in the grid, and we replace it by its nearest neighbor in the grid. # # To be smarter, we can perform a [bilinear interpolation](https://en.wikipedia.org/wiki/Bilinear_interpolation). This takes an average of the values of the pixels corresponding to the four points in the grid surrounding the result of our transformation, with weights depending on how close we are to each of those points. This comes at a computational cost though, so this is where we have to be careful. # # As for the values that go out of the picture, we treat them by padding it either: # - by adding zeros on the side, so the pixels that fall out will be black (zero padding) # - by replacing them by the value at the border (border padding) # - by mirroring the content of the picture on the other side (reflection padding). # ### Be smart and efficient # Usually, data augmentation libraries have separated the different operations. So for a resize, we’ll go through the three steps above, then if we do a random rotation, we’ll go again to do those steps, then for a zoom etc... The fastai library works differently in the sense that it will do all the transformations on the coordinates at the same time, so that we only do those three steps once, especially the last one (the interpolation) is the most heavy in computation. # # The first thing is that we can regroup all affine transforms in just one pass (because an affine transform composed by an affine transform is another affine transform). This is already done in some other libraries but we pushed it one step further. We integrated the resize, the crop and any non-affine transformation of the coordinates in the same process. Let’s dig in! # # - In step 1, when we create the grid, we use the new size we want for our image, so `new_h, new_w` (and not `h, w`). This takes care of the resize operation. # - In step 2, we do only one affine transformation, by multiplying all the affine matrices of the transforms we want to do beforehand (those are 3 by 3 matrices, so it’s super fast). Then we apply to the coordinates any non-affine transformation we might want (jitter, perspective wrappin, etc) before... # - Step 2.5: we crop (either center or randomly) the coordinates we want to keep. Cropping could have been done at any point, but by doing it just before the interpolation, we don’t compute pixel values that won’t be used at the end, gaining again a bit of efficiency # - Finally step 3: the final interpolation. Afterward, we can apply on the picture all the tranforms that operate pixel-wise (brightness or contrast for instance) and we’re done with data augmentation. # # Note that the transforms operating on pixels are applied in two phases: # - first the transforms that deal with lighting properties are applied to the logits of the pixels. We group them together so we only need to do the *conversion pixels -> logits -> pixels* transformation once. # - then we apply the transforms that modify the pixel. # # This is why all transforms have an attribute (such as [`TfmAffine`](/vision.image.html#TfmAffine), [`TfmCoord`](/vision.image.html#TfmCoord), [`TfmCrop`](/vision.image.html#TfmCrop) or [`TfmPixel`](/vision.image.html#TfmPixel)) so that the fastai library can regroup them and apply them all together at the right step. In terms of implementation: # # - [`_affine_grid`](https://github.com/fastai/fastai/blob/master/fastai/vision/image.py#L469) is reponsible for creating the grid of coordinates # - [`_affine_mult`](https://github.com/fastai/fastai/blob/master/fastai/vision/image.py#L479) is in charge of doing the affine multiplication on that grid # - [`_grid_sample`](https://github.com/fastai/fastai/blob/master/fastai/vision/image.py#L464)is the function that is responsible for the interpolation step # ### Final result # TODO: add a comparison of speeds. # # Adding a new transformation doesn't impact performance much (since the costly steps are done only once). In contrast with other libraries with classic data augmentation implementations, augmentation usually result in a longer training time. # # In terms of final result, doing only one interpolation also gives a better result. If we stack several transforms and do an interpolation on each one, we approximate the true value of our coordinates in some way. This tends to blur the image a bit, which often negatively affects performance. By regrouping all the transformations together and only doing this step at the end, the image is often less blurry and the model often performs better. # # See how the same rotation then zoom done separately (so there are two interpolations): # # Image interpolated twice # # is blurrier than regrouping the transforms and doing just one interpolation: # # Image interpolated once # In[23]: show_doc(ResizeMethod, doc_string=False) # Resize methods to transform an image to a given size: # - crop: resize so that the image fits in the desired canvas on its smaller side and crop # - pad: resize so that the image fits in the desired canvas on its bigger side and crop # - squish: resize theimage by squishing it in the desired canvas # - np: doesn't resize the image # ## Transform classes # The basic class that defines transformation in the fastai library is [`Transform`](/vision.image.html#Transform). # In[24]: show_doc(Transform, title_level=3, alt_doc_string="Create a `Transform` for `func` and assign it a priority `order`.") # In[25]: show_doc(RandTransform, title_level=3, doc_string=False) # Create a [`Transform`](/vision.image.html#Transform) from func that can be randomized. Each argument of `func` in kwargs is analyzed and if it has a type annotaiton that is a random function, this function will be called to pick a value for it. This value will be stored in the `resolved` dictionary. Following the same idea, `p` is the probability for func to be called and `do_run` will be set to True if it was the cause, False otherwise. Lastly, setting `is_random` to False allows to send specific values for each parameter. # In[26]: show_doc(RandTransform.resolve) # To handle internally the data augmentation as explained earlier, each [`Transform`](/vision.image.html#Transform) as a type, so that the fastai library can regoup them together efficiently. There are five types of [`Transform`](/vision.image.html#Transform) which all work as decorators for a deterministic function. # In[27]: show_doc(TfmAffine, title_level=3, doc_string=False) # Decorate `func` to make it an affine transform; `func` should return the 3 by 3 matrix representing the transform. The default `order` is 5 for such transforms. # In[28]: show_doc(TfmCoord, title_level=3, doc_string=False) # Decorate `func` to make it a coord transform; `func` should take two mandatory arguments: `c` (the flow of coordinate) and `img_size` (the size of the corresponding image) and return the modified flow of coordinates. The default `order` is 4 for such transforms. # In[29]: show_doc(TfmLighting, title_level=3, doc_string=False) # Decorate `func` to make it a lighting transform; `func` takes the logits of the pixel tensor and changes them. The default `order` is 8 for such transforms. # In[30]: show_doc(TfmPixel, title_level=3, doc_string=False) # Decorate `func` to make it a pixel transform; `func` takes the pixel tensor and modifies it. The default `order` is 10 for such transforms. # In[31]: show_doc(TfmCrop, title_level=3, doc_string=False) # Decorate `func` to make it a crop transform; This is a special case of [`TfmPixel`](/vision.image.html#TfmPixel) with `order` set to 99. # To help with the conversion to logits for the [`TfmLighting`](/vision.image.html#TfmLighting), we use these helper functions: # In[32]: show_doc(logit) # Take the element-wise logit of `x`. Logit is the invert function of the sigmoid, defined by log(x/(1-x)). # In[33]: show_doc(logit_) # In-place version of [`logit`](/vision.image.html#logit). # ## Internal funcitons of the Image classes # All the [`Image`](/vision.image.html#Image) classes have the same internal functions that deal with data augmentation. # In[34]: show_doc(Image.affine, doc_string=False) # Apply the affine transform given by `func` to the object. # In[35]: show_doc(Image.clone) # In[36]: show_doc(Image.coord, doc_string=False) # Apply the coord transform given by `func` to the object. # In[37]: show_doc(Image.lighting, doc_string=False) # Apply the lighting transform given by `func` to the object. # In[38]: show_doc(Image.pixel, doc_string=False) # Apply the pixel transform given by func to the object. # In[39]: show_doc(Image.refresh) # In[40]: show_doc(Image.resize) # In[41]: show_doc(Image.save) # In[42]: show_doc(Image.show, full_name='show') # Send the [`Image`](/vision.image.html#Image) to [`show_image`](/vision.image.html#show_image) with `ax`, `y` and the `kwargs`. # In[43]: show_doc(FlowField, title_level=3) # ## Undocumented Methods - Methods moved below this line will intentionally be hidden # In[44]: show_doc(Image.crop_pad) # In[45]: show_doc(Image.contrast) # In[46]: show_doc(Image.brightness) # In[47]: show_doc(Image.flip_lr) # In[48]: show_doc(Image.pad) # In[49]: show_doc(Image.pixel) # In[50]: show_doc(Image.zoom) # In[51]: show_doc(Image.dihedral) # In[52]: show_doc(ImageSegment.refresh) # In[53]: show_doc(Image.jitter) # In[54]: show_doc(Image.squish) # In[55]: show_doc(Image.skew) # In[56]: show_doc(Image.perspective_warp) # In[57]: show_doc(Image.zoom_squish) # In[58]: show_doc(Image.crop) # In[59]: show_doc(Image.tilt) # In[60]: show_doc(Image.rotate) # In[61]: show_doc(ImageSegment.lighting) # In[62]: show_doc(Image.symmetric_warp) # ## New Methods - Please document or move to the undocumented section # # In[63]: show_doc(Image.dihedral_affine) # # In[64]: show_doc(ImagePoints.pixel) # # In[65]: show_doc(ImageBBox.clone) # # In[66]: show_doc(ImagePoints.refresh) # # In[67]: show_doc(ImagePoints.coord) # # In[68]: show_doc(Image.set_sample) # # In[69]: show_doc(ImageSegment.show) # # In[70]: show_doc(ImagePoints.show) # # In[71]: show_doc(ImagePoints.clone) # # In[72]: show_doc(ImagePoints.lighting) # # In[73]: show_doc(Transform.calc) # # In[74]: show_doc(Image.flip_affine) # # In[75]: show_doc(ImageBBox.show) # # In[76]: show_doc(ImagePoints.resize) #