[go: up one dir, main page]

0% found this document useful (0 votes)
313 views8 pages

Tutorial Pytorch Best Commands

Download as pdf or txt
Download as pdf or txt
Download as pdf or txt
You are on page 1/ 8

PyTorch Cheat Sheet

Using PyTorch 1.2, torchaudio 0.3, torchtext 0.4, and torchvision 0.4.

General PyTorch and model I/O onnx.checker.check_model(model)

# loading PyTorch Pre-trained models and domain-specific utils


import torch
Audio
# cuda
import torch.cuda as tCuda # various functions and settings import torchaudio
torch.backends.cudnn.deterministic = True # deterministic ML? # load and save audio
torch.backends.cudnn.benchmark = False # deterministic ML? stream, sample_rate = torchaudio.load('file')
torch.cuda.is_available # check if cuda is is_available torchaudio.save('file', stream, sample_rate)
tensor.cuda() # moving tensor to gpu # 16 bit wav files only
tensor.cpu() # moving tensor to cpu stream, sample_rate=torchaudio.load_wav('file')
tensor.to(device) # copy densor to device xyz
torch.device('cuda') # or 'cuda0', 'cuda1' if multiple devices # datasets (can be used with torch.utils.data.DataLoader)
torch.device('cpu') # default import torchaudio.datasets as aDatasets
aDatasets.YESNO('folder_for_storage', download=True)
# static computation graph/C++ export preparation aDatasets.VCTK('folder_for_storage', download=True)
torch.jit.trace()
from torch.jit import script, trace
@script # transforms
import torchaudio.transforms as aTransforms
# load and save a model aTransforms.AmplitudeToDB
torch.save(model, 'model_file') aTransforms.MelScale
model = torch.load('model_file') aTransforms.MelSpectrogram
model.eval() # set to inference aTransforms.MFCC
torch.save(model.state_dict(), 'model_file') # only state dict aTransforms.MuLawEncoding
model = ModelCalss() aTransforms.MuLawDecoding
model.load_state_dict(torch.load('model_file') aTransforms.Resample
aTransforms.Spectogram
# save to onnx
torch.onnx.export # kaldi support
torch.onnx.export_to_pretty_string import torchaudio.compliance.kaldi as aKaldi
import torchaudio.kaldi_io as aKaldiIO
# load onnx model aKaldi.spectogram
import onnx aKaldi.fbank
model = onnx.load('model.onnx') aKaldi.mfcc
# check model aKaldi.resample_waveform
aKaldiIO.read_vec_int_ark tDatasets.DBpedia
aKaldiIO.read_vec_flt_scp tDatasets.YelpReviewPolarity
aKaldiIO.read_vec_flt_ark tDatasets.YelpReviewFull
aKaldiIO.read_mat_scp tDatasets.YahooAnswers
aKaldiIO.read_mat_ark tDatasets.AmazonReviewPolarity
tDatasets.AmazonReviewFull
# functional/direct function access
import torchaudio.functional as aFunctional # question classification
tDatasets.TREC
# sox effects/passing data between Python and C++
import torchaudio.sox_effects as aSox_effects # entailment
tDatasets.SNLI
Text tDatasets.MultiNLI

import torchtext # language modeling


# various data-related function and classes tDatasets.WikiText2
import torchtext.data as tData tDatasets.WikiText103
tData.Batch tDatasets.PennTreebank
tData.Dataset
tData.Example # machine translation
tData.TabularDataset tDatasets.TranslationDataset # subclass
tData.RawField tDatasets.Multi30k
tData.Field tDatasets.IWSLT
tData.ReversibleField tDatasets.WMT14
tData.SubwordField
tData.NestedField # sequence tagging
tData.Iterator tDatasets.SequenceTaggingDataset # subclass
tData.BucketIterator tDatasets.UDPOS
tData.BPTTIterator tDatasets.CoNLL2000Chunking
tData.Pipeline # similar to vTransform and sklearn's pipeline
tData.batch # function # question answering
tData.pool # function tDatasets.BABI20

# vocabulary and pre-trained embeddings


# datasets import torchtext.vocab as tVocab
import torchtext.datasets as tDatasets tVocab.Vocab # create a vocabulary
# sentiment analysis tVocab.SubwordVocab # create subvocabulary
tDatasets.SST tVocab.Vectors # word vectors
tDatasets.IMDb tVocab.GloVe # GloVe embeddings
tDatasets.TextClassificationDataset # subclass of all datasets below tVocab.FastText # FastText embeddings
tDatasets.AG_NEWS tVocab.CharNGram # character n-gram
tDatasets.SogouNews

2
Vision # classification
vModels.alexnet(pretrained=True)
import torchvision vModels.densenet121()
# datasets vModels.densenet161()
import torchvision.datasets as vDatasets vModels.densenet169()
vDatasets.MNIST vModels.densenet201()
vDatasets.FashionMNIST vModels.googlenet()
vDatasets.KMNIST vModels.inception_v3()
vDatasets.EMNIST vModels.mnasnet0_5()
vDatasets.QMNIST vModels.mnasnet0_75()
vDatasets.FakeData # randomly generated images vModels.mnasnet1_0()
vDatasets.COCOCaptions vModels.mnasnet1_3()
vDatasets.COCODetection vModels.mobilenet_v2()
vDatasets.LSUN vModels.resnet18()
vDatasets.ImageFolder # data loader for a certain image folder structure vModels.resnet34()
vDatasets.DatasetFolder # data loader for a certain folder structure vModels.resnet50()
vDatasets.ImageNet vModels.resnet50_32x4d()
vDatasets.CIFAR vModels.resnet101()
vDatasets.STL10 vModels.resnet101_32x8d()
vDatasets.SVHN vModels.resnet152()
vDatasets.PhotoTour vModels.wide_resnet50_2()
vDatasets.SBU vModels.wide_resnet101_2()
vDatasets.Flickr vModels.shufflenet_v2_x0_5()
vDatasets.VOC vModels.shufflenet_v2_x1_0()
vDatasets.Cityscapes vModels.shufflenet_v2_x1_5()
vDatasets.SBD vModels.shufflenet_v2_x2_0()
vDatasets.USPS vModels.squeezenet1_0()
vDatasets.Kinetics400 vModels.squeezenet1_1()
vDatasets.HMDB51 vModels.vgg11()
vDatasets.UCF101 vModels.vgg11_bn()
vModels.vgg13()
# video IO vModels.vgg13_bn()
import torchvision.io as vIO vModels.vgg16()
vIO.read_video('file', start_pts, end_pts) vModels.vgg16_bn()
vIO.write_video('file', video, fps, video_codec) vModels.vgg19()
torchvision.utils.save_image(image,'file') vModels.vgg19_bn()

# pretrained models/model architectures # semantic segmentation


import torchvision.models as vModels vModels.segmentation.fcn_resnet50()
# models can be constructed with random weights () vModels.segmentation.fcn_resnet101()
# or pretrained (pretrained=True) vModels.segmentation.deeplabv3_resnet50()

3
vModels.segmentation.deeplabv3_resnet101()
# transforms on torch tensors
# object and/or keypoint detection, instance segmentation vTransforms.LinearTransformation
vModels.detection.fasterrcnn_resnet50_fpn() vTransforms.Normalize
vModels.detection.maskrcnn_resnet50_fpn() vTransforms.RandomErasing
vModels.detection.keypointrcnn_resnet50_fpn()
# conversion
# video classification vTransforms.ToPILImage
vModels.video.r3d_18() vTransforms.ToTensor
vModels.video.mc3_18()
vModels.video.r2plus1d_18() # direct access to transform functions
import torchvision.transforms.functional as vTransformsF
# transforms
import torchvision.transforms as vTransforms # operators for computer vision
vTransforms.Compose(transforms) # chaining transforms # (not supported by TorchScript yet)
vTransforms.Lambda(someLambdaFunction) import torchvision.ops as vOps
vOps.nms # non-maximum suppression (NMS)
# transforms on PIL images vOps.roi_align # <=> vOps.ROIALIGN
vTransforms.CenterCrop(height,width) vOps.roi_pool # <=> vOps.ROIPOOL
vTransforms.ColorJitter(brightness=0, contrast=0,
saturation=0, hue=0)
vTransforms.FiveCrop Data loader
vTransforms.Grayscale
vTransforms.Pad # classes and functions to represent datasets
vTransforms.RandomAffine(degrees, translate=None, from torch.utils.data import Dataset, Dataloader
scale=None, shear=None,
resample=False, fillcolor=0)
vTransforms.RandomApply(transforms, p=0.5) Neural network
vTransforms.RandomChoice(transforms)
vTransforms.RandomCrop import torch.nn as nn
vTransforms.RandomGrayscale
vTransforms.RandomHorizontalFlip Activation functions
vTransforms.RandomOrder
vTransforms.RandomPerspective nn.AdaptiveLogSoftmaxWithLoss
vTransforms.RandomResizedCrop nn.CELU
vTransforms.RandomRotation nn.EL
vTransforms.RandomSizedCrop nn.Hardshrink
vTransforms.RandomVerticalFlip nn.Hardtanh
vTransforms.Resize nn.LeakyReLU
vTransforms.Scale nn.LogSigmoid
vTransforms.TenCrop nn.LogSoftmax
nn.MultiheadAttention

4
nn.PReLU optim.lr_scheduler.Scheduler
nn.ReLU
nn.ReLU6 # optimizers
nn.RReLU(lower,upper) # sampled from uniform distribution optim.Optimizer # general optimizer classes
nn.SELU optim.Adadelta
nn.Sigmoid optim.Adagrad
nn.Softmax optim.Adam
nn.Softmax2d optim.AdamW # adam with decoupled weight decay regularization
nn.Softmin optim.Adamax
nn.Softplus optim.ASGD # averged stochastic gradient descent
nn.Softshrink optim.LBFGS
nn.Softsign optim.RMSprop
nn.Tanh optim.Rprop
nn.Tanhshrink optim.SGD
nn.Threshols optim.SparseAdam # for sparse tensors

Loss function # learning rate


optim.lr_scheduler
nn.BCELoss optim.lr_scheduler.LambdaLR
nn.BCEWithLogitsLoss optim.lr_scheduler.StepLR
nn.CosineEmbeddingLoss optim.lr_scheduler.MultiStepLR
nn.CrossEntropyLoss optim.lr_scheduler.ExponentialLR
nn.CTCLoss optim.lr_scheduler.CosineAnnealingLR
nn.HingeEmbeddingLoss optim.lr_scheduler.ReduceLROnPlateau
nn.KLDivLoss optim.lr_scheduler.CyclicLR
nn.L1Loss
nn.MarginRankingLoss Pre-defined layers/deep learning
nn.MSELoss
nn.MultiLabelSoftMarginLoss # containers
nn.MultiMarginLoss nn.Module{ ,List,Dict}
nn.NLLLoss nn.Parameter{List,Dict}
nn.PoissonNLLLoss nn.Sequential
nn.SmoothL1Loss
nn.SoftMarginLoss # linear layers
nn.TripletMarginLoss nn.Linear
nn.Bilinear
nn.Indentity
Optimizer
import torch.optim as optim # dropout layers
# general useage nn.AlphaDropout
scheduler = optim.Optimizer(....) nn.Dropout{ ,2d,3d}
scheduler.step() # step-wise

5
# convolutional layers Functional
nn.Conv{1,2,3}d
import torch.nn.functional as F
nn.ConvTranspose{1,2,3}d
# direct function access and not via classes (torch.nn) ???
nn.Fold
nn.Unfold
NumPy-like functions
# pooling
nn.AdaptiveAvgPool{1,2,3}d Loading PyTorch and tensor basics
nn.AdaptiveMaxPool{1,2,3}d
nn.AvgPool{1,2,3}d # loading PyTorch
nn.MaxPool{1,2,3}d import torch
nn.MaxUnpool{1,2,3}d
# defining a tensor
# recurrent layers torch.tensor((values))
nn.GRU
nn.LSTM # define data type
nn.RNN torch.tensor((values), dtype=torch.int16)

# padding layers # converting a NumPy array to a PyTorch tensor


nn.ReflectionPad{1,2}d torch.from_numpy(numpyArray)
nn.ReplicationPad{1,2,3}d
nn.ConstantPad{1,2,3}d # create a tensor of zeros
torch.zeros((shape))
# normalization layers torch.zeros_like(other_tensor)
nn.BatchNorm{1,2,3}d
nn.InstanceNorm{1,2,3}d # create a tensor of ones
torch.ones((shape))
# transformer layers torch.ones_like(other_tensor)
nn.Transformer
nn.TransformerEncoder # create an idenity matrix
nn.TransformerDecoder torch.eye(numberOfRows)
nn.TransformerEncoderLayer
nn.TransformerDecoderLayer # create tensor with same values
torch.full((shape), value)
torch.full_like(other_tensor,value)

# create an empty tensor


Computational graph torch.empty((shape))
torch.empty_like(other_tensor)
# various functions and classes to use and manipulate
# automaic differentiation and the computational graph # create sequences
import torch.autograd as autograd torch.arange(startNumber, endNumber, stepSize)

6
torch.linspace(startNumber, endNumber, stepSize) torch.randn_like(other_tensor)
torch.logspace(startNUmber, endNumber, stepSize)
# random permuation of integers
# concatenate tensors # range [0,n-1)
torch.cat((tensors), axis) torch.randperm()

# split tensors into sub-tensors Math (element-wise)


torch.split(tensor, splitSize)
# basic operations
# (un)squeeze tensor torch.abs(tensor)
torch.squeeze(tensor, dimension) torch.add(tensor, tensor2) # or tensor+scalar
torch.unsqueeze(tensor, dim) torch.div(tensor, tensor2) # or tensor/scalar
torch.mult(tensor,tensor2) # or tensor*scalar
# reshape tensor torch.sub(tensor, tensor2) # or tensor-scalar
torch.reshape(tensor, shape) torch.ceil(tensor)
torch.floor(tensor)
# transpose tensor torch.remainder(tensor, devisor) #or torch.fmod()
torch.t(tensor) # 1D and 2D tensors torch.sqrt(tensor)
torch.transpose(tensor, dim0, dim1)
# trigonometric functions
torch.acos(tensor)
Random numbers
torch.asin(tensor)
# set seed torch.atan(tensor)
torch.manual_seed(seed) torch.atan2(tensor)
torch.cos(tensor)
# generate a tensor with random numbers torch.cosh(tensor)
# of interval [0,1) torch.sin(tensor)
torch.rand(size) torch.sinh(tensor)
torch.rand_like(other_tensor) torch.tan(tensor)
torch.tanh(tensor)
# generate a tensor with random integer numbers
# of interval [lowerInt,higherInt] # exponentials and logarithms
torch.randint(lowerInt, torch.exp(tensor)
higherInt, torch.expm1(tensor) # exp(input-1)
(tensor_shape)) torch.log(tensor)
torch.randint_like(other_tensor, torch.log10(tensor)
lowerInt, torch.log1p(tensor) # log(1+input)
higherInt) torch.log2(tensor)

# generate a tensor of random numbers drawn # other


# from a normal distribution (mean=0, var=1) torch.erfc(tensor) # error function
torch.randn((size)) torch.erfinv(tensor) # inverse error function

7
torch.round(tensor) # round to full integer torch.pinverse(tensor) # pseudo-inverse
torch.power(tensor, power)
Other
Math (not element-wise) torch.isinf(tensor)
torch.argmax(tensor) torch.sort(tensor)
torch.argmin(tensor) torch.fft(tensor, signal_dim)
torch.max(tensor) torch.ifft(tensor, signal_dim)
torch.min(tensor) torch.rfft(tensor, signal_dim)
torch.mean(tensor) torch.rifft(tensor, signal_dim)
torch.median(tensor) torch.stft(tensor, n_fft)
torch.norm(tensor, norm) torch.bincount(tensor)
torch.prod(tensor) # product of all elements torch.diagonal(tensor)
torch.std(tensor) torch.flatten(tensor, start_dim)
torch.sum(tensor) torch.rot90(tensor)
torch.unique(tensor) torch.histc(tensor)
torch.var(tensor) torch.trace(tensor)
torch.cross(tensor1,tensor2) torch.svd(tensor)
torch.cartesian_prod(tensor1, tensor2, ...)
torch.einsum(equation,tensor)
torch.tensordot(tensor1,tensor2)
PyTorch C++
torch.cholesky(tensor) (aka libtorch)
torch.cholesky_torch(tensor)
torch.dot(tensor1, tensor2) // PyTorch header file(s)
torch.eig(tensor) #import <torch/script.h>
torch.inverse(tensor)
torch.det(tensor) torch::jit::script::Module module;

©Simon Wenkel (https://www.simonwenkel.com)


This pdf is licensed under the CC BY-SA 4.0 license.

You might also like