8000 index with expr[idx] · DiffSharp/DiffSharp@b900373 · GitHub
[go: up one dir, main page]

Skip to content

Commit b900373

Browse files
committed
index with expr[idx]
1 parent f4ed7da commit b900373

38 files changed

+1222
-1222
lines changed

docs/quickstart.fsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ open DiffSharp.Data
6161
let dataset = MNIST("../data", train=true, transform=id, n=10)
6262

6363
// Inspect a single image and label
64-
let data, label = dataset.[7]
64+
let data, label = dataset[7]
6565

6666
// Save image to file
6767
data.saveImage("test.png")

examples/rnn.fsx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,8 @@ let validInterval = 100
7272
let start = System.DateTime.Now
7373
for epoch = 1 to epochs do
7474
for i, x, t in loader.epoch() do
75-
let input = x.[*,..seqLen-2]
76-
let target = t.[*,1..]
75+
let input = x[*,..seqLen-2]
76+
let target = t[*,1..]
7777
rnn.reset()
7878
languageModel.reverseDiff()
7979
let output = input --> languageModel

examples/vae.fsx

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -31,18 +31,18 @@ type VAE(xDim:int, zDim:int, ?hDims:seq<int>, ?nonlinearity:Tensor->Tensor, ?non
3131
else
3232
Array.append (Array.append [|xDim|] hDims) [|zDim|]
3333

34-
let enc = Array.append [|for i in 0..dims.Length-2 -> Linear(dims.[i], dims.[i+1])|] [|Linear(dims.[dims.Length-2], dims.[dims.Length-1])|]
35-
let dec = [|for i in 0..dims.Length-2 -> Linear(dims.[i+1], dims.[i])|] |> Array.rev
34+
let enc = Array.append [|for i in 0..dims.Length-2 -> Linear(dims[i], dims[i+1])|] [|Linear(dims[dims.Length-2], dims[dims.Length-1])|]
35+
let dec = [|for i in 0..dims.Length-2 -> Linear(dims[i+1], dims[i])|] |> Array.rev
3636
do
3737
base.addModel([for m in enc -> box m])
3838
base.addModel([for m in dec -> box m])
3939

4040
let encode x =
4141
let mutable x = x
4242
for i in 0..enc.Length-3 do
43-
x <- nonlinearity <| enc.[i].forward(x)
44-
let mu = enc.[enc.Length-2].forward(x)
45-
let logVar = enc.[enc.Length-1].forward(x)
43+
x <- nonlinearity <| enc[i].forward(x)
44+
let mu = enc[enc.Length-2].forward(x)
45+
let logVar = enc[enc.Length-1].forward(x)
4646
mu, logVar
4747

4848
let sampleLatent mu (logVar:Tensor) =
@@ -53,8 +53,8 @@ type VAE(xDim:int, zDim:int, ?hDims:seq<int>, ?nonlinearity:Tensor->Tensor, ?non
5353
let decode z =
5454
let mutable h = z
5555
for i in 0..dec.Length-2 do
56-
h <- nonlinearity <| dec.[i].forward(h)
57-
nonlinearityLast <| dec.[dec.Length-1].forward(h)
56+
h <- nonlinearity <| dec[i].forward(h)
57+
nonlinearityLast <| dec[dec.Length-1].forward(h)
5858

5959
member _.encodeDecode(x:Tensor) =
6060
let mu, logVar = encode (x.view([-1; xDim]))
@@ -75,7 +75,7 @@ type VAE(xDim:int, zDim:int, ?hDims:seq<int>, ?nonlinearity:Tensor->Tensor, ?non
7575
let normalize = defaultArg normalize true
7676
let xRecon, mu, logVar = m.encodeDecode x
7777
let loss = VAE.loss(xRecon, x, mu, logVar)
78-
if normalize then loss / x.shape.[0] else loss
78+
if normalize then loss / x.shape[0] else loss
7979

8080
member _.sample(?numSamples:int) =
8181
let numSamples = defaultArg numSamples 1

src/DiffSharp.Backends.Reference/Reference.RawTensor.fs

Lines changed: 179 additions & 179 deletions
Large diffs are not rendered by default.

src/DiffSharp.Backends.Torch/Torch.RawTensor.fs

Lines changed: 58 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -132,18 +132,18 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
132132
override t.GetItem(indexes:int[]) =
133133
Shape.checkCanIndex t.Shape indexes
134134
if t.Shape.Length = 0 then t.ToScalar()
135-
else t.MakeLike(tt=tt.[indexes |> Array.map (fun v -> torch.TensorIndex.Single(int64 v))], shape=[||]).ToScalar()
135+
else t.MakeLike(tt=tt[indexes |> Array.map (fun v -> torch.TensorIndex.Single(int64 v))], shape=[||]).ToScalar()
136136

137137
override t.GetSlice(fullBounds:int[,]) =
138138
let n = fullBounds.GetLength(0)
139139
let newShape = Shape.checkCanGetSlice t.Shape fullBounds
140140

141141
let indices =
142142
Array.init n (fun i ->
143-
let start = fullBounds.[i,0]
144-
let stop = fullBounds.[i,1] + 1
143+
let start = fullBounds[i,0]
144+
let stop = fullBounds[i,1] + 1
145145
let len = stop - start
146-
if fullBounds.[i,2] = 1 && len = 1 then
146+
if fullBounds[i,2] = 1 && len = 1 then
147147
torch.TensorIndex.Single(int64 start)
148148
else
149149
torch.TensorIndex.Slice(start=int64 start, stop=int64 stop))
@@ -164,27 +164,27 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
164164
| Dtype.Int8 ->
165165
let data = tt.data<sbyte>()
166166
for i in 0 .. n-1 do
167-
res <- combineHashes res (int32 data.[int64 i])
167+
res <- combineHashes res (int32 data[int64 i])
168168
| Dtype.Byte ->
169169
let data = tt.data<byte>()
170170
for i in 0 .. n-1 do
171-
res <- combineHashes res (int32 data.[int64 i])
171+
res <- combineHashes res (int32 data[int64 i])
172172
| Dtype.Bool ->
173173
let data = tt.data<byte>()
174174
for i in 0 .. n-1 do
175-
res <- combineHashes res (int32 data.[int64 i])
175+
res <- combineHashes res (int32 data[int64 i])
176176
| Dtype.Int16 ->
177177
let data = tt.data<int16>()
178178
for i in 0 .. n-1 do
179-
res <- combineHashes res (int32 data.[int64 i] )
179+
res <- combineHashes res (int32 data[int64 i] )
180180
| Dtype.Int32 ->
181181
let data = tt.data<int32>()
182182
for i in 0 .. n-1 do
183-
res <- combineHashes res (int32 data.[int64 i])
183+
res <- combineHashes res (int32 data[int64 i])
184184
| Dtype.Int64 ->
185185
let data = tt.data<int64>()
186186
for i in 0 .. n-1 do
187-
res <- combineHashes res (int32 data.[int64 i])
187+
res <- combineHashes res (int32 data[int64 i])
188188
| Dtype.Float16 ->
189189
for i in 0 .. n-1 do
190190
res <- combineHashes res (hash (tt.ReadCpuFloat16(int64 i)))
@@ -194,11 +194,11 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
194194
| Dtype.Float32 ->
195195
let data = tt.data<single>()
196196
for i in 0 .. n-1 do
197-
res <- combineHashes res (hash data.[int64 i])
197+
res <- combineHashes res (hash data[int64 i])
198198
| Dtype.Float64 ->
199199
let data = tt.data<double>()
200200
for i in 0 .. n-1 do
201-
res <- combineHashes res (hash data.[int64 i])
201+
res <- combineHashes res (hash data[int64 i])
202202
res
203203

204204
override t.Expand(newShape) =
@@ -222,12 +222,12 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
222222
let tt = torchMoveTo tt Device.CPU
223223
match t.Shape with
224224
| [| |] -> tt.ToScalar() |> box
225-
| [| d0 |] -> upcast Array.init<'T> d0 (fun i -> tt.[int64 i] |> conv)
226-
| [| d0; d1 |] -> upcast Array2D.init<'T> d0 d1 (fun i j -> tt.[int64 i, int64 j] |> conv)
227-
| [| d0; d1; d2 |] -> upcast Array3D.init<'T> d0 d1 d2 (fun i j k -> tt.[int64 i, int64 j, int64 k] |> conv)
228-
| [| d0; d1; d2; d3 |] -> upcast Array4D.init<'T> d0 d1 d2 d3 (fun i j k l -> tt.[int64 i, int64 j, int64 k, int64 l] |> conv)
229-
| [| d0; d1; d2; d3; d4 |] -> upcast Array5D.init<'T> d0 d1 d2 d3 d4 (fun i j k l m -> tt.[int64 i, int64 j, int64 k, int64 l, int64 m] |> conv)
230-
| [| d0; d1; d2; d3; d4; d5 |] -> upcast Array6D.init<'T> d0 d1 d2 d3 d4 d5 (fun i j k l m n -> tt.[int64 i, int64 j, int64 k, int64 l, int64 m, int64 n] |> conv)
225+
| [| d0 |] -> upcast Array.init<'T> d0 (fun i -> tt[int64 i] |> conv)
226+
| [| d0; d1 |] -> upcast Array2D.init<'T> d0 d1 (fun i j -> tt[int64 i, int64 j] |> conv)
227+
| [| d0; d1; d2 |] -> upcast Array3D.init<'T> d0 d1 d2 (fun i j k -> tt[int64 i, int64 j, int64 k] |> conv)
228+
| [| d0; d1; d2; d3 |] -> upcast Array4D.init<'T> d0 d1 d2 d3 (fun i j k l -> tt[int64 i, int64 j, int64 k, int64 l] |> conv)
229+
| [| d0; d1; d2; d3; d4 |] -> upcast Array5D.init<'T> d0 d1 d2 d3 d4 (fun i j k l m -> tt[int64 i, int64 j, int64 k, int64 l, int64 m] |> conv)
230+
| [| d0; d1; d2; d3; d4; d5 |] -> upcast Array6D.init<'T> d0 d1 d2 d3 d4 d5 (fun i j k l m n -> tt[int64 i, int64 j, int64 k, int64 l, int64 m, int64 n] |> conv)
231231
| _ -> failwithf "Cannot get array for Tensor dimensions > 6. Consider slicing the Tensor. Shape: %A" t.Shape
232232

233233
override t.ToValues() =
@@ -250,7 +250,7 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
250250
let data = tt2.data<'T>()
251251
let res = Array.zeroCreate<'T> (int32 tt2.NumberOfElements)
252252
for i in 0 .. int32 tt2.NumberOfElements - 1 do
253-
res.[i] <- data.[int64 i]
253+
res[i] <- data[int64 i]
254254
res
255255

256256
member t.ToRawData() : Array =
@@ -276,7 +276,7 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
276276
let tts, shapes = tensors |> Array.map (fun t -> (t :?> TorchRawTensor).TorchTensor, t.Shape) |> Array.unzip
277277
let _n, _shape1, _shape2, newShape = Shape.checkCanStack shapes dim
278278
let result = torch.stack(tts, int64 dim)
279-
(tensors.[0] :?> TorchRawTensor).MakeLike(result, newShape)
279+
(tensors[0] :?> TorchRawTensor).MakeLike(result, newShape)
280280

281281
override t.UnstackT(dim) =
282282
let shape = t.Shape
@@ -336,7 +336,7 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
336336
let mutable res = tt
337337
let mutable c = 0
338338
for i in 0 .. t.Dim - 1 do
339-
if shape.[i] = 1 && (dim = -1 || i = dim) then
339+
if shape[i] = 1 && (dim = -1 || i = dim) then
340340
res <- res.squeeze(int64 c)
341341
else
342342
c <- c + 1
@@ -364,10 +364,10 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
364364
let mutable res = tt
365365
for i=0 to dims-1 do
366366
let s = res.shape
367-
s.[i] <- int64 outputShape.[i]
367+
s[i] <- int64 outputShape[i]
368368
let resnew = t.ZerosLike(fromTorchShape s)
369-
let indices = Array.init t.Shape.[i] id |> Array.map ((*) dilations.[i] >> int64)
370-
let mutable d = TorchInt64TensorOps().CreateFromFlatArray(indices, shape=[|t.Shape.[i]|], device=t.Device)
369+
let indices = Array.init t.Shape[i] id |> Array.map ((*) dilations[i] >> int64)
370+
let mutable d = TorchInt64TensorOps().CreateFromFlatArray(indices, shape=[|t.Shape[i]|], device=t.Device)
371371
for _=0 to i-1 do
372372
d <- d.UnsqueezeT(0)
373373
for _=i+1 to dims-1 do
@@ -381,7 +381,7 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
381381
let outputShape = Shape.undilatedShape shape dilations
382382
let mutable res = tt
383383
for d in 0 .. dilations.Length - 1 do
384-
res <- res.slice(int64 d, 0L, int64 shape.[d], int64 dilations.[d])
384+
res <- res.slice(int64 d, 0L, int64 shape[d], int64 dilations[d])
385385
t.MakeLike(res, outputShape)
386386

387387
override t.GatherT(dim:int, indices) =
@@ -497,20 +497,20 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
497497
for i = t.Dim - 1 downto 0 do
498498
let (struct (values2, indexes)) = values.max(int64 i)
499499
values <- values2
500-
idxs.[i] <- indexes
500+
idxs[i] <- indexes
501501

502502
for i = 0 to t.Dim - 1 do
503-
let idx = idxs.[i]
503+
let idx = idxs[i]
504504

505-
res.[i] <-
505+
res[i] <-
506506
match i with
507507
| 0 -> idx.ToInt64()
508-
| 1 -> idx.[res.[0]].ToInt64()
509-
| 2 -> idx.[res.[0], res.[1]].ToInt64()
510-
| 3 -> idx.[res.[0], res.[1], res.[2]].ToInt64()
511-
| 4 -> idx.[res.[0], res.[1], res.[2], res.[3]].ToInt64()
512-
| 5 -> idx.[res.[0], res.[1], res.[2], res.[3], res.[4]].ToInt64()
513-
| 6 -> idx.[res.[0], res.[1], res.[2], res.[3], res.[4], res.[5]].ToInt64()
508+
| 1 -> idx[res[0]].ToInt64()
509+
| 2 -> idx[res[0], res[1]].ToInt64()
510+
| 3 -> idx[res[0], res[1], res[2]].ToInt64()
511+
| 4 -> idx[res[0], res[1], res[2], res[3]].ToInt64()
512+
| 5 -> idx[res[0], res[1], res[2], res[3], res[4]].ToInt64()
513+
| 6 -> idx[res[0], res[1], res[2], res[3], res[4], res[5]].ToInt64()
514514
| _ -> failwith "MaxIndexT > 6d nyi for torch"
515515
res |> Array.map int32
516516

@@ -535,20 +535,20 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
535535
for i = t.Dim - 1 downto 0 do
536536
let (struct (values2, indexes)) = values.min(int64 i)
537537
values <- values2
538-
idxs.[i] <- indexes
538+
idxs[i] <- indexes
539539

540540
for i = 0 to t.Dim - 1 do
541-
let idx = idxs.[i]
541+
let idx = idxs[i]
542542

543-
res.[i] <-
543+
res[i] <-
544544
match i with
545545
| 0 -> idx.ToInt64()
546-
| 1 -> idx.[res.[0]].ToInt64()
547-
| 2 -> idx.[res.[0], res.[1]].ToInt64()
548-
| 3 -> idx.[res.[0], res.[1], res.[2]].ToInt64()
549-
| 4 -> idx.[res.[0], res.[1], res.[2], res.[3]].ToInt64()
550-
| 5 -> idx.[res.[0], res.[1], res.[2], res.[3], res.[4]].ToInt64()
551-
| 6 -> idx.[res.[0], res.[1], res.[2], res.[3], res.[4], res.[5]].ToInt64()
546+
| 1 -> idx[res[0]].ToInt64()
547+
| 2 -> idx[res[0], res[1]].ToInt64()
548+
| 3 -> idx[res[0], res[1], res[2]].ToInt64()
549+
| 4 -> idx[res[0], res[1], res[2], res[3]].ToInt64()
550+
| 5 -> idx[res[0], res[1], res[2], res[3], res[4]].ToInt64()
551+
| 6 -> idx[res[0], res[1], res[2], res[3], res[4], res[5]].ToInt64()
552552
| _ -> failwith "MinIndexT > 6d nyi for torch"
553553
res |> Array.map int32
554554

@@ -575,9 +575,9 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
575575
let res = tt.clone()
576576
let mutable t1Slice = res // will share memory with res
577577
for d in 0 .. location.Length - 1 do
578-
let len2 = expandedShape2.[d]
579-
if location.[d] <> 0 || len2 <> shape1.[d] then
580-
t1Slice <- t1Slice.narrow(int64 d, int64 location.[d], int64 len2)
578+
let len2 = expandedShape2[d]
579+
if location[d] <> 0 || len2 <> shape1[d] then
580+
t1Slice <- t1Slice.narrow(int64 d, int64 location[d], int64 len2)
581581
t1Slice.add_(t2Expanded) |> ignore
582582
t1.MakeLike(res)
583583

@@ -674,8 +674,8 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
674674
| _ ->
675675
let (t1BatchPart, t1MatrixPart), (t2BatchPart, t2MatrixPart) = Shape.checkCanMatmul t1.Shape t2.Shape
676676
if t1BatchPart <> t2BatchPart then failwithf "Cannot matrix multiply raw tensors with shapes %A, %A - mismatch batching" t1.Shape t2.Shape
677-
let t1rows = t1MatrixPart.[0]
678-
let t2cols = t2MatrixPart.[1]
677+
let t1rows = t1MatrixPart[0]
678+
let t2cols = t2MatrixPart[1]
679679
let newShape = Array.append t1BatchPart [| t1rows; t2cols |]
680680
let result =
681681
// "addmm for CUDA tensors only supports floating-point types. Try converting the tensors with .float()" | const char *
@@ -777,7 +777,7 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
777777
//let batchSize, channels, _inputSize, _outputShape = Shape.computeMaxUnpool1d t1.Shape outputSize
778778
let t1X = t1.UnsqueezeT(2)
779779
let indicesX = indices.UnsqueezeT(2)
780-
let resulttX = t1X.MaxUnpool2D(indicesX, [| outputSize.[0]; outputSize.[1]; 1; outputSize.[2] |])
780+
let resulttX = t1X.MaxUnpool2D(indicesX, [| outputSize[0]; outputSize[1]; 1; outputSize[2] |])
781781
let resultt = resulttX.SqueezeT(2)
782782
resultt
783783

@@ -789,7 +789,7 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
789789

790790
// note, LibTorch only wants the last two elements of the output size passsed in
791791
// "There should be exactly two elements (height, width) in output_size (max_unpooling2d_shape_check at ...)"
792-
let outputSize = outputSize.[2..3]
792+
let outputSize = outputSize[2..3]
793793

794794
// TODO: consider switching to the torch::nn module for MaxUnpool2d
795795

@@ -804,7 +804,7 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
804804

805805
// note, LibTorch only wants the last three elements of the output size passsed in
806806
// "There should be exactly three elements (depth, height, width) in output_size (max_unpooling3d_shape_check at ..\..\aten\src\ATen\native\MaxUnpooling.cpp:231)"
807-
let outputSize = outputSize.[2..4]
807+
let outputSize = outputSize[2..4]
808808

809809
// NOTE: strides and padding must always be specified for torch::max_unpool3d C++ entry
810810
// TODO: consider switching to the torch::nn module for MaxUnpool
@@ -1124,9 +1124,9 @@ type TorchRawTensor(tt: torch.Tensor, shape: Shape, dtype: Dtype, device: Device
11241124
let t2Expanded = t2.TorchTensor.expand(toTorchShape expandedShape2)
11251125
let mutable t1Slice = tt // will share memory with res
11261126
for d in 0 .. location.Length - 1 do
1127-
let len2 = expandedShape2.[d]
1128-
if location.[d] <> 0 || len2 <> shape1.[d] then
1129-
t1Slice <- t1Slice.narrow(int64 d, int64 location.[d], int64 len2)
1127+
let len2 = expandedShape2[d]
1128+
if location[d] <> 0 || len2 <> shape1[d] then
1129+
t1Slice <- t1Slice.narrow(int64 d, int64 location[d], int64 len2)
11301130
t1Slice.add_(t2Expanded) |> ignore
11311131

11321132
override _.SubInPlace(t2) = checkMutable(); tt.sub_(t2.TorchTensor) |> ignore
@@ -1246,7 +1246,7 @@ type TorchTensorOps<'T, 'T2>
12461246
// torch.InitializeDevice(device.ToTorch) |> ignore
12471247
let t =
12481248
match shape with
1249-
| [| |] -> fromScalar(values.[0])
1249+
| [| |] -> fromScalar(values[0])
12501250
| _ -> from (values, toTorchShape shape)
12511251
let tt = torchMoveTo t device
12521252
TorchRawTensor(tt, shape, dtype, device) :> RawTensor
@@ -1419,14 +1419,14 @@ type TorchBackendTensorStatics() =
14191419
let supported = Array.zeroCreate<int> 32
14201420
let isSupported (deviceType: DiffSharp.DeviceType) =
14211421
let n = int deviceType
1422-
match supported.[n] with
1422+
match supported[n] with
14231423
| 0 ->
14241424
try
14251425
torch.empty([| 1L |], device= torch.Device(deviceType.ToTorch, index=0)) |> ignore
1426-
supported.[n] <- 1
1426+
supported[n] <- 1
14271427
true
14281428
with _ ->
1429-
supported.[n] <- 2
1429+
supported[n] <- 2
14301430
false
14311431
| 1 -> true
14321432
| _ -> false

0 commit comments

Comments
 (0)
0