Tutorial Pytorch Best Commands
Tutorial Pytorch Best Commands
Tutorial Pytorch Best Commands
Using PyTorch 1.2, torchaudio 0.3, torchtext 0.4, and torchvision 0.4.
2
Vision # classification
vModels.alexnet(pretrained=True)
import torchvision vModels.densenet121()
# datasets vModels.densenet161()
import torchvision.datasets as vDatasets vModels.densenet169()
vDatasets.MNIST vModels.densenet201()
vDatasets.FashionMNIST vModels.googlenet()
vDatasets.KMNIST vModels.inception_v3()
vDatasets.EMNIST vModels.mnasnet0_5()
vDatasets.QMNIST vModels.mnasnet0_75()
vDatasets.FakeData # randomly generated images vModels.mnasnet1_0()
vDatasets.COCOCaptions vModels.mnasnet1_3()
vDatasets.COCODetection vModels.mobilenet_v2()
vDatasets.LSUN vModels.resnet18()
vDatasets.ImageFolder # data loader for a certain image folder structure vModels.resnet34()
vDatasets.DatasetFolder # data loader for a certain folder structure vModels.resnet50()
vDatasets.ImageNet vModels.resnet50_32x4d()
vDatasets.CIFAR vModels.resnet101()
vDatasets.STL10 vModels.resnet101_32x8d()
vDatasets.SVHN vModels.resnet152()
vDatasets.PhotoTour vModels.wide_resnet50_2()
vDatasets.SBU vModels.wide_resnet101_2()
vDatasets.Flickr vModels.shufflenet_v2_x0_5()
vDatasets.VOC vModels.shufflenet_v2_x1_0()
vDatasets.Cityscapes vModels.shufflenet_v2_x1_5()
vDatasets.SBD vModels.shufflenet_v2_x2_0()
vDatasets.USPS vModels.squeezenet1_0()
vDatasets.Kinetics400 vModels.squeezenet1_1()
vDatasets.HMDB51 vModels.vgg11()
vDatasets.UCF101 vModels.vgg11_bn()
vModels.vgg13()
# video IO vModels.vgg13_bn()
import torchvision.io as vIO vModels.vgg16()
vIO.read_video('file', start_pts, end_pts) vModels.vgg16_bn()
vIO.write_video('file', video, fps, video_codec) vModels.vgg19()
torchvision.utils.save_image(image,'file') vModels.vgg19_bn()
3
vModels.segmentation.deeplabv3_resnet101()
# transforms on torch tensors
# object and/or keypoint detection, instance segmentation vTransforms.LinearTransformation
vModels.detection.fasterrcnn_resnet50_fpn() vTransforms.Normalize
vModels.detection.maskrcnn_resnet50_fpn() vTransforms.RandomErasing
vModels.detection.keypointrcnn_resnet50_fpn()
# conversion
# video classification vTransforms.ToPILImage
vModels.video.r3d_18() vTransforms.ToTensor
vModels.video.mc3_18()
vModels.video.r2plus1d_18() # direct access to transform functions
import torchvision.transforms.functional as vTransformsF
# transforms
import torchvision.transforms as vTransforms # operators for computer vision
vTransforms.Compose(transforms) # chaining transforms # (not supported by TorchScript yet)
vTransforms.Lambda(someLambdaFunction) import torchvision.ops as vOps
vOps.nms # non-maximum suppression (NMS)
# transforms on PIL images vOps.roi_align # <=> vOps.ROIALIGN
vTransforms.CenterCrop(height,width) vOps.roi_pool # <=> vOps.ROIPOOL
vTransforms.ColorJitter(brightness=0, contrast=0,
saturation=0, hue=0)
vTransforms.FiveCrop Data loader
vTransforms.Grayscale
vTransforms.Pad # classes and functions to represent datasets
vTransforms.RandomAffine(degrees, translate=None, from torch.utils.data import Dataset, Dataloader
scale=None, shear=None,
resample=False, fillcolor=0)
vTransforms.RandomApply(transforms, p=0.5) Neural network
vTransforms.RandomChoice(transforms)
vTransforms.RandomCrop import torch.nn as nn
vTransforms.RandomGrayscale
vTransforms.RandomHorizontalFlip Activation functions
vTransforms.RandomOrder
vTransforms.RandomPerspective nn.AdaptiveLogSoftmaxWithLoss
vTransforms.RandomResizedCrop nn.CELU
vTransforms.RandomRotation nn.EL
vTransforms.RandomSizedCrop nn.Hardshrink
vTransforms.RandomVerticalFlip nn.Hardtanh
vTransforms.Resize nn.LeakyReLU
vTransforms.Scale nn.LogSigmoid
vTransforms.TenCrop nn.LogSoftmax
nn.MultiheadAttention
4
nn.PReLU optim.lr_scheduler.Scheduler
nn.ReLU
nn.ReLU6 # optimizers
nn.RReLU(lower,upper) # sampled from uniform distribution optim.Optimizer # general optimizer classes
nn.SELU optim.Adadelta
nn.Sigmoid optim.Adagrad
nn.Softmax optim.Adam
nn.Softmax2d optim.AdamW # adam with decoupled weight decay regularization
nn.Softmin optim.Adamax
nn.Softplus optim.ASGD # averged stochastic gradient descent
nn.Softshrink optim.LBFGS
nn.Softsign optim.RMSprop
nn.Tanh optim.Rprop
nn.Tanhshrink optim.SGD
nn.Threshols optim.SparseAdam # for sparse tensors
5
# convolutional layers Functional
nn.Conv{1,2,3}d
import torch.nn.functional as F
nn.ConvTranspose{1,2,3}d
# direct function access and not via classes (torch.nn) ???
nn.Fold
nn.Unfold
NumPy-like functions
# pooling
nn.AdaptiveAvgPool{1,2,3}d Loading PyTorch and tensor basics
nn.AdaptiveMaxPool{1,2,3}d
nn.AvgPool{1,2,3}d # loading PyTorch
nn.MaxPool{1,2,3}d import torch
nn.MaxUnpool{1,2,3}d
# defining a tensor
# recurrent layers torch.tensor((values))
nn.GRU
nn.LSTM # define data type
nn.RNN torch.tensor((values), dtype=torch.int16)
6
torch.linspace(startNumber, endNumber, stepSize) torch.randn_like(other_tensor)
torch.logspace(startNUmber, endNumber, stepSize)
# random permuation of integers
# concatenate tensors # range [0,n-1)
torch.cat((tensors), axis) torch.randperm()
7
torch.round(tensor) # round to full integer torch.pinverse(tensor) # pseudo-inverse
torch.power(tensor, power)
Other
Math (not element-wise) torch.isinf(tensor)
torch.argmax(tensor) torch.sort(tensor)
torch.argmin(tensor) torch.fft(tensor, signal_dim)
torch.max(tensor) torch.ifft(tensor, signal_dim)
torch.min(tensor) torch.rfft(tensor, signal_dim)
torch.mean(tensor) torch.rifft(tensor, signal_dim)
torch.median(tensor) torch.stft(tensor, n_fft)
torch.norm(tensor, norm) torch.bincount(tensor)
torch.prod(tensor) # product of all elements torch.diagonal(tensor)
torch.std(tensor) torch.flatten(tensor, start_dim)
torch.sum(tensor) torch.rot90(tensor)
torch.unique(tensor) torch.histc(tensor)
torch.var(tensor) torch.trace(tensor)
torch.cross(tensor1,tensor2) torch.svd(tensor)
torch.cartesian_prod(tensor1, tensor2, ...)
torch.einsum(equation,tensor)
torch.tensordot(tensor1,tensor2)
PyTorch C++
torch.cholesky(tensor) (aka libtorch)
torch.cholesky_torch(tensor)
torch.dot(tensor1, tensor2) // PyTorch header file(s)
torch.eig(tensor) #import <torch/script.h>
torch.inverse(tensor)
torch.det(tensor) torch::jit::script::Module module;